diff -Nru clamav-1.0.1+dfsg/Cargo.lock clamav-1.0.2+dfsg/Cargo.lock --- clamav-1.0.1+dfsg/Cargo.lock 2023-02-13 06:00:35.000000000 +0000 +++ clamav-1.0.2+dfsg/Cargo.lock 2023-08-15 22:24:07.000000000 +0000 @@ -9,15 +9,6 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] -name = "aho-corasick" -version = "0.7.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" -dependencies = [ - "memchr", -] - -[[package]] name = "ansi_term" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -45,24 +36,24 @@ [[package]] name = "bindgen" -version = "0.59.2" +version = "0.65.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" dependencies = [ "bitflags", "cexpr", "clang-sys", - "clap", - "env_logger", "lazy_static", "lazycell", "log", "peeking_take_while", + "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", + "syn 2.0.15", "which", ] @@ -119,7 +110,7 @@ "quote", "serde", "serde_json", - "syn", + "syn 1.0.107", "tempfile", "toml", ] @@ -284,19 +275,6 @@ checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] name = "exr" version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -444,12 +422,6 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] name = "image" version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -691,7 +663,7 @@ dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -707,6 +679,16 @@ ] [[package]] +name = "prettyplease" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" +dependencies = [ + "proc-macro2", + "syn 2.0.15", +] + +[[package]] name = "primal-check" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -717,18 +699,18 @@ [[package]] name = "proc-macro2" -version = "1.0.51" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -770,8 +752,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" dependencies = [ - "aho-corasick", - "memchr", "regex-syntax", ] @@ -855,7 +835,7 @@ dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -931,6 +911,17 @@ ] [[package]] +name = "syn" +version = "2.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] name = "tempfile" version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -945,15 +936,6 @@ ] [[package]] -name = "termcolor" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" -dependencies = [ - "winapi-util", -] - -[[package]] name = "textwrap" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -979,7 +961,7 @@ dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -1084,7 +1066,7 @@ "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.107", "wasm-bindgen-shared", ] @@ -1106,7 +1088,7 @@ dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -1151,15 +1133,6 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" diff -Nru clamav-1.0.1+dfsg/clamsubmit/CMakeLists.txt clamav-1.0.2+dfsg/clamsubmit/CMakeLists.txt --- clamav-1.0.1+dfsg/clamsubmit/CMakeLists.txt 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/clamsubmit/CMakeLists.txt 2023-08-15 22:24:07.000000000 +0000 @@ -34,6 +34,8 @@ PRIVATE ClamAV::libclamav ClamAV::common + OpenSSL::SSL + OpenSSL::Crypto JSONC::jsonc CURL::libcurl ) if(APPLE) diff -Nru clamav-1.0.1+dfsg/cmake/FindRust.cmake clamav-1.0.2+dfsg/cmake/FindRust.cmake --- clamav-1.0.1+dfsg/cmake/FindRust.cmake 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/cmake/FindRust.cmake 2023-08-15 22:24:07.000000000 +0000 @@ -294,6 +294,21 @@ WORKING_DIRECTORY "${ARGS_SOURCE_DIRECTORY}" DEPENDS ${LIB_SOURCES} COMMENT "Building ${ARGS_TARGET} in ${ARGS_BINARY_DIRECTORY} with: ${cargo_EXECUTABLE} ${MY_CARGO_ARGS_STRING}") + elseif("${CMAKE_OSX_ARCHITECTURES}" MATCHES "^(arm64)$") + add_custom_command( + OUTPUT "${OUTPUT}" + COMMAND ${CMAKE_COMMAND} -E env "CARGO_CMD=build" "CARGO_TARGET_DIR=${ARGS_BINARY_DIRECTORY}" "MAINTAINER_MODE=${MAINTAINER_MODE}" "RUSTFLAGS=${RUSTFLAGS}" ${cargo_EXECUTABLE} ${MY_CARGO_ARGS} --target=aarch64-apple-darwin + WORKING_DIRECTORY "${ARGS_SOURCE_DIRECTORY}" + DEPENDS ${LIB_SOURCES} + COMMENT "Building ${ARGS_TARGET} in ${ARGS_BINARY_DIRECTORY} with: ${cargo_EXECUTABLE} ${MY_CARGO_ARGS_STRING}") + elseif("${CMAKE_OSX_ARCHITECTURES}" MATCHES "^(x86_64)$") + add_custom_command( + OUTPUT "${OUTPUT}" + COMMAND ${CMAKE_COMMAND} -E env "CARGO_CMD=build" "CARGO_TARGET_DIR=${ARGS_BINARY_DIRECTORY}" "MAINTAINER_MODE=${MAINTAINER_MODE}" "RUSTFLAGS=${RUSTFLAGS}" ${cargo_EXECUTABLE} ${MY_CARGO_ARGS} --target=x86_64-apple-darwin + COMMAND ${CMAKE_COMMAND} -E make_directory "${ARGS_BINARY_DIRECTORY}/${RUST_COMPILER_TARGET}/${CARGO_BUILD_TYPE}" + WORKING_DIRECTORY "${ARGS_SOURCE_DIRECTORY}" + DEPENDS ${LIB_SOURCES} + COMMENT "Building ${ARGS_TARGET} in ${ARGS_BINARY_DIRECTORY} with: ${cargo_EXECUTABLE} ${MY_CARGO_ARGS_STRING}") else() add_custom_command( OUTPUT "${OUTPUT}" @@ -382,10 +397,17 @@ ${rustc_VERSION} < ${RUSTC_MINIMUM_REQUIRED}") endif() +if(WIN32) + file(TOUCH ${CMAKE_BINARY_DIR}/empty-file) + set(EMPTY_FILE "${CMAKE_BINARY_DIR}/empty-file") +else() + set(EMPTY_FILE "/dev/null") +endif() + # Determine the native libs required to link w/ rust static libs -# message(STATUS "Detecting native static libs for rust: ${rustc_EXECUTABLE} --crate-type staticlib --print=native-static-libs /dev/null") +# message(STATUS "Detecting native static libs for rust: ${rustc_EXECUTABLE} --crate-type staticlib --print=native-static-libs ${EMPTY_FILE}") execute_process( - COMMAND ${CMAKE_COMMAND} -E env "CARGO_TARGET_DIR=${CMAKE_BINARY_DIR}" ${rustc_EXECUTABLE} --crate-type staticlib --print=native-static-libs /dev/null + COMMAND ${CMAKE_COMMAND} -E env "CARGO_TARGET_DIR=${CMAKE_BINARY_DIR}" ${rustc_EXECUTABLE} --crate-type staticlib --print=native-static-libs ${EMPTY_FILE} OUTPUT_VARIABLE RUST_NATIVE_STATIC_LIBS_OUTPUT ERROR_VARIABLE RUST_NATIVE_STATIC_LIBS_ERROR RESULT_VARIABLE RUST_NATIVE_STATIC_LIBS_RESULT diff -Nru clamav-1.0.1+dfsg/CMakeLists.txt clamav-1.0.2+dfsg/CMakeLists.txt --- clamav-1.0.1+dfsg/CMakeLists.txt 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/CMakeLists.txt 2023-08-15 22:24:07.000000000 +0000 @@ -22,7 +22,7 @@ set(VERSION_SUFFIX "") project( ClamAV - VERSION "1.0.1" + VERSION "1.0.2" DESCRIPTION "ClamAV open source email, web, and end-point anti-virus toolkit." ) set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) diff -Nru clamav-1.0.1+dfsg/debian/changelog clamav-1.0.2+dfsg/debian/changelog --- clamav-1.0.1+dfsg/debian/changelog 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/changelog 2023-08-23 14:44:37.000000000 +0000 @@ -1,3 +1,39 @@ +clamav (1.0.2+dfsg-1ubuntu1) mantic; urgency=medium + + [ Marc Deslauriers ] + * Merge with Debian unstable as security update (LP: #2031565). + Remaining changes: + - Extend ifupdown script to support networkd-dispatcher. + + d/clamav-freshclam-ifupdown: Modernize some parts of + the script. Implement support for networkd-dispatcher. + + d/clamav-freshclam.links: Install the + clamav-freshclam-ifupdown script inside the proper + /usr/lib/networkd-dispatcher/{off,routable}.d/ + directories. (LP: 1718227) + - clamav-base.postinst.in: Quell warning from check for clamav user + (LP: 1920217). + - CVE-2023-20197 + - CVE-2023-20212 + + [ Vladimir Petko ] + * d/p/resolve-armhf-ftbfs.patch: resolve armhf failure to build from + source. + + -- Marc Deslauriers Wed, 23 Aug 2023 10:44:37 -0400 + +clamav (1.0.2+dfsg-1) unstable; urgency=medium + + * Import 1.0.2 (Closes: #1050057) + - CVE-2023-20197 (Possible DoS in HFS+ file parser). + - CVE-2023-20212 (Possible DoS in AutoIt file parser). + * Use cmake for xml2 detection (Closes: #949100). + * Replace tomsfastmath with OpenSSL's BN. + * Don't enable clamonacc by default (Closes: #1030171). + * Let the clamav-daemon.socket depend on the service file again + (Closes: #1044136). + + -- Sebastian Andrzej Siewior Sat, 19 Aug 2023 19:07:32 +0200 + clamav (1.0.1+dfsg-2ubuntu1) mantic; urgency=medium * Merge with Debian unstable (LP: #2018063). Remaining changes: diff -Nru clamav-1.0.1+dfsg/debian/control clamav-1.0.2+dfsg/debian/control --- clamav-1.0.1+dfsg/debian/control 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/control 2023-08-21 17:56:36.000000000 +0000 @@ -27,7 +27,6 @@ libpcre2-dev, libssl-dev, libsystemd-dev [linux-any], - libtfm-dev, libxml2-dev, perl:native, pkg-config, @@ -138,7 +137,7 @@ Section: libs Architecture: any Multi-Arch: same -Depends: ${misc:Depends}, ${shlibs:Depends}, libtfm1 (> 0.13.1) +Depends: ${misc:Depends}, ${shlibs:Depends} Suggests: libclamunrar, libclamunrar11 Replaces: libclamav9 Description: anti-virus utility for Unix - library diff -Nru clamav-1.0.1+dfsg/debian/.git-dpm clamav-1.0.2+dfsg/debian/.git-dpm --- clamav-1.0.1+dfsg/debian/.git-dpm 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/.git-dpm 2023-08-19 16:44:39.000000000 +0000 @@ -1,8 +1,8 @@ # see git-dpm(1) from git-dpm package -b1b7438ad627cb935c5e7b9923342bc2e26d4137 -b1b7438ad627cb935c5e7b9923342bc2e26d4137 -b0f7da741add13e9a19254cc0697931223f948eb -b0f7da741add13e9a19254cc0697931223f948eb -clamav_1.0.1.orig.tar.xz -fe18edded75204a2b4b4ec0c73c22da14e5235c2 -14132600 +de9cef7ab6e5a57247f9598340a0e64869429870 +de9cef7ab6e5a57247f9598340a0e64869429870 +7b4b490a9f8c93c9ef66c8d34be648796dd9f7bd +7b4b490a9f8c93c9ef66c8d34be648796dd9f7bd +clamav_1.0.2+dfsg.orig.tar.xz +c845d2c777adda943e7421c601924e1bee1864a8 +14134372 diff -Nru clamav-1.0.1+dfsg/debian/libclamav11.symbols clamav-1.0.2+dfsg/debian/libclamav11.symbols --- clamav-1.0.1+dfsg/debian/libclamav11.symbols 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/libclamav11.symbols 2023-08-19 16:44:38.000000000 +0000 @@ -1,25 +1,25 @@ libclamav.so.11 libclamav11 #MINVER# * Build-Depends-Package: libclamav-dev - CLAMAV_PRIVATE@CLAMAV_PRIVATE 1.0.1 + CLAMAV_PRIVATE@CLAMAV_PRIVATE 1.0.2 CLAMAV_PUBLIC@CLAMAV_PUBLIC 1.0.0 - __cli_strcasestr@CLAMAV_PRIVATE 1.0.1 - __cli_strndup@CLAMAV_PRIVATE 1.0.1 - __cli_strnlen@CLAMAV_PRIVATE 1.0.1 - __cli_strnstr@CLAMAV_PRIVATE 1.0.1 - base64Flush@CLAMAV_PRIVATE 1.0.1 - blobAddData@CLAMAV_PRIVATE 1.0.1 - blobCreate@CLAMAV_PRIVATE 1.0.1 - blobDestroy@CLAMAV_PRIVATE 1.0.1 - cl_ASN1_GetTimeT@CLAMAV_PRIVATE 1.0.1 + __cli_strcasestr@CLAMAV_PRIVATE 1.0.2 + __cli_strndup@CLAMAV_PRIVATE 1.0.2 + __cli_strnlen@CLAMAV_PRIVATE 1.0.2 + __cli_strnstr@CLAMAV_PRIVATE 1.0.2 + base64Flush@CLAMAV_PRIVATE 1.0.2 + blobAddData@CLAMAV_PRIVATE 1.0.2 + blobCreate@CLAMAV_PRIVATE 1.0.2 + blobDestroy@CLAMAV_PRIVATE 1.0.2 + cl_ASN1_GetTimeT@CLAMAV_PRIVATE 1.0.2 cl_always_gen_section_hash@CLAMAV_PUBLIC 1.0.0 - cl_base64_decode@CLAMAV_PRIVATE 1.0.1 - cl_base64_encode@CLAMAV_PRIVATE 1.0.1 - cl_cleanup_crypto@CLAMAV_PRIVATE 1.0.1 + cl_base64_decode@CLAMAV_PRIVATE 1.0.2 + cl_base64_encode@CLAMAV_PRIVATE 1.0.2 + cl_cleanup_crypto@CLAMAV_PRIVATE 1.0.2 cl_countsigs@CLAMAV_PUBLIC 1.0.0 cl_cvdfree@CLAMAV_PUBLIC 1.0.0 cl_cvdhead@CLAMAV_PUBLIC 1.0.0 cl_cvdparse@CLAMAV_PUBLIC 1.0.0 - cl_cvdunpack@CLAMAV_PRIVATE 1.0.1 + cl_cvdunpack@CLAMAV_PRIVATE 1.0.2 cl_cvdverify@CLAMAV_PUBLIC 1.0.0 cl_debug@CLAMAV_PUBLIC 1.0.0 cl_engine_addref@CLAMAV_PUBLIC 1.0.0 @@ -28,7 +28,7 @@ cl_engine_get_num@CLAMAV_PUBLIC 1.0.0 cl_engine_get_str@CLAMAV_PUBLIC 1.0.0 cl_engine_new@CLAMAV_PUBLIC 1.0.0 - cl_engine_set_clcb_engine_compile_progress@CLAMAV_PRIVATE 1.0.1 + cl_engine_set_clcb_engine_compile_progress@CLAMAV_PRIVATE 1.0.2 cl_engine_set_clcb_file_inspection@CLAMAV_PUBLIC 1.0.0 cl_engine_set_clcb_file_props@CLAMAV_PUBLIC 1.0.0 cl_engine_set_clcb_hash@CLAMAV_PUBLIC 1.0.0 @@ -37,7 +37,7 @@ cl_engine_set_clcb_pre_cache@CLAMAV_PUBLIC 1.0.0 cl_engine_set_clcb_pre_scan@CLAMAV_PUBLIC 1.0.0 cl_engine_set_clcb_sigload@CLAMAV_PUBLIC 1.0.0 - cl_engine_set_clcb_sigload_progress@CLAMAV_PRIVATE 1.0.1 + cl_engine_set_clcb_sigload_progress@CLAMAV_PRIVATE 1.0.2 cl_engine_set_clcb_stats_add_sample@CLAMAV_PUBLIC 1.0.0 cl_engine_set_clcb_stats_decrement_count@CLAMAV_PUBLIC 1.0.0 cl_engine_set_clcb_stats_flush@CLAMAV_PUBLIC 1.0.0 @@ -58,19 +58,19 @@ cl_fmap_close@CLAMAV_PUBLIC 1.0.0 cl_fmap_open_handle@CLAMAV_PUBLIC 1.0.0 cl_fmap_open_memory@CLAMAV_PUBLIC 1.0.0 - cl_get_pkey_file@CLAMAV_PRIVATE 1.0.1 - cl_get_x509_from_mem@CLAMAV_PRIVATE 1.0.1 - cl_hash_data@CLAMAV_PRIVATE 1.0.1 + cl_get_pkey_file@CLAMAV_PRIVATE 1.0.2 + cl_get_x509_from_mem@CLAMAV_PRIVATE 1.0.2 + cl_hash_data@CLAMAV_PRIVATE 1.0.2 cl_hash_destroy@CLAMAV_PUBLIC 1.0.0 - cl_hash_file_fd@CLAMAV_PRIVATE 1.0.1 - cl_hash_file_fd_ctx@CLAMAV_PRIVATE 1.0.1 - cl_hash_file_fp@CLAMAV_PRIVATE 1.0.1 + cl_hash_file_fd@CLAMAV_PRIVATE 1.0.2 + cl_hash_file_fd_ctx@CLAMAV_PRIVATE 1.0.2 + cl_hash_file_fp@CLAMAV_PRIVATE 1.0.2 cl_hash_init@CLAMAV_PUBLIC 1.0.0 cl_init@CLAMAV_PUBLIC 1.0.0 - cl_initialize_crypto@CLAMAV_PRIVATE 1.0.1 + cl_initialize_crypto@CLAMAV_PRIVATE 1.0.2 cl_load@CLAMAV_PUBLIC 1.0.0 - cl_load_cert@CLAMAV_PRIVATE 1.0.1 - cl_load_crl@CLAMAV_PRIVATE 1.0.1 + cl_load_cert@CLAMAV_PRIVATE 1.0.2 + cl_load_crl@CLAMAV_PRIVATE 1.0.2 cl_retdbdir@CLAMAV_PUBLIC 1.0.0 cl_retflevel@CLAMAV_PUBLIC 1.0.1 cl_retver@CLAMAV_PUBLIC 1.0.0 @@ -80,203 +80,203 @@ cl_scanfile_callback@CLAMAV_PUBLIC 1.0.0 cl_scanmap_callback@CLAMAV_PUBLIC 1.0.0 cl_set_clcb_msg@CLAMAV_PUBLIC 1.0.0 - cl_sha1@CLAMAV_PRIVATE 1.0.1 - cl_sha256@CLAMAV_PRIVATE 1.0.1 - cl_sign_data@CLAMAV_PRIVATE 1.0.1 - cl_sign_data_keyfile@CLAMAV_PRIVATE 1.0.1 - cl_sign_file_fd@CLAMAV_PRIVATE 1.0.1 - cl_sign_file_fp@CLAMAV_PRIVATE 1.0.1 + cl_sha1@CLAMAV_PRIVATE 1.0.2 + cl_sha256@CLAMAV_PRIVATE 1.0.2 + cl_sign_data@CLAMAV_PRIVATE 1.0.2 + cl_sign_data_keyfile@CLAMAV_PRIVATE 1.0.2 + cl_sign_file_fd@CLAMAV_PRIVATE 1.0.2 + cl_sign_file_fp@CLAMAV_PRIVATE 1.0.2 cl_statchkdir@CLAMAV_PUBLIC 1.0.0 cl_statfree@CLAMAV_PUBLIC 1.0.0 cl_statinidir@CLAMAV_PUBLIC 1.0.0 cl_strerror@CLAMAV_PUBLIC 1.0.0 cl_update_hash@CLAMAV_PUBLIC 1.0.0 - cl_validate_certificate_chain@CLAMAV_PRIVATE 1.0.1 - cl_validate_certificate_chain_ts_dir@CLAMAV_PRIVATE 1.0.1 - cl_verify_signature@CLAMAV_PRIVATE 1.0.1 - cl_verify_signature_fd@CLAMAV_PRIVATE 1.0.1 - cl_verify_signature_fd_x509@CLAMAV_PRIVATE 1.0.1 - cl_verify_signature_fd_x509_keyfile@CLAMAV_PRIVATE 1.0.1 - cl_verify_signature_hash@CLAMAV_PRIVATE 1.0.1 - cl_verify_signature_hash_x509@CLAMAV_PRIVATE 1.0.1 - cl_verify_signature_hash_x509_keyfile@CLAMAV_PRIVATE 1.0.1 - cl_verify_signature_x509@CLAMAV_PRIVATE 1.0.1 - cl_verify_signature_x509_keyfile@CLAMAV_PRIVATE 1.0.1 - cli_ac_buildtrie@CLAMAV_PRIVATE 1.0.1 - cli_ac_chklsig@CLAMAV_PRIVATE 1.0.1 - cli_ac_free@CLAMAV_PRIVATE 1.0.1 - cli_ac_freedata@CLAMAV_PRIVATE 1.0.1 - cli_ac_init@CLAMAV_PRIVATE 1.0.1 - cli_ac_initdata@CLAMAV_PRIVATE 1.0.1 - cli_ac_scanbuff@CLAMAV_PRIVATE 1.0.1 - cli_add_content_match_pattern@CLAMAV_PRIVATE 1.0.1 - cli_basename@CLAMAV_PRIVATE 1.0.1 - cli_bm_free@CLAMAV_PRIVATE 1.0.1 - cli_bm_init@CLAMAV_PRIVATE 1.0.1 - cli_bm_scanbuff@CLAMAV_PRIVATE 1.0.1 - cli_build_regex_list@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_context_alloc@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_context_destroy@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_context_getresult_int@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_context_set_trace@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_context_setfile@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_context_setfuncid@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_context_setparam_int@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_context_setparam_ptr@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_debug@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_debug_printsrc@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_describe@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_destroy@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_done@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_init@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_load@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_prepare2@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_printversion@CLAMAV_PRIVATE 1.0.1 - cli_bytecode_run@CLAMAV_PRIVATE 1.0.1 - cli_bytefunc_describe@CLAMAV_PRIVATE 1.0.1 - cli_byteinst_describe@CLAMAV_PRIVATE 1.0.1 - cli_bytetype_describe@CLAMAV_PRIVATE 1.0.1 - cli_bytevalue_describe@CLAMAV_PRIVATE 1.0.1 - cli_calloc@CLAMAV_PRIVATE 1.0.1 - cli_check_auth_header@CLAMAV_PRIVATE 1.0.1 - cli_chomp@CLAMAV_PRIVATE 1.0.1 - cli_codepage_to_utf8@CLAMAV_PRIVATE 1.0.1 - cli_ctime@CLAMAV_PRIVATE 1.0.1 - cli_dbgmsg@CLAMAV_PRIVATE 1.0.1 - cli_dbgmsg_no_inline@CLAMAV_PRIVATE 1.0.1 - cli_dconf_init@CLAMAV_PRIVATE 1.0.1 - cli_debug_flag@CLAMAV_PRIVATE 1.0.1 - cli_detect_environment@CLAMAV_PRIVATE 1.0.1 - cli_disasm_one@CLAMAV_PRIVATE 1.0.1 - cli_errmsg@CLAMAV_PRIVATE 1.0.1 - cli_filecopy@CLAMAV_PRIVATE 1.0.1 - cli_free_vba_project@CLAMAV_PRIVATE 1.0.1 - cli_ftw@CLAMAV_PRIVATE 1.0.1 - cli_genhash_pe@CLAMAV_PRIVATE 1.0.1 - cli_gentemp@CLAMAV_PRIVATE 1.0.1 - cli_gentemp_with_prefix@CLAMAV_PRIVATE 1.0.1 - cli_gentempfd@CLAMAV_PRIVATE 1.0.1 - cli_gentempfd_with_prefix@CLAMAV_PRIVATE 1.0.1 - cli_get_debug_flag@CLAMAV_PRIVATE 1.0.1 - cli_get_filepath_from_filedesc@CLAMAV_PRIVATE 1.0.1 - cli_get_last_virus_str@CLAMAV_PRIVATE 1.0.1 - cli_getdsig@CLAMAV_PRIVATE 1.0.1 - cli_gettmpdir@CLAMAV_PRIVATE 1.0.1 - cli_hashfile@CLAMAV_PRIVATE 1.0.1 - cli_hashset_destroy@CLAMAV_PRIVATE 1.0.1 - cli_hashstream@CLAMAV_PRIVATE 1.0.1 - cli_hex2str@CLAMAV_PRIVATE 1.0.1 - cli_hex2ui@CLAMAV_PRIVATE 1.0.1 - cli_infomsg_simple@CLAMAV_PRIVATE 1.0.1 - cli_initroots@CLAMAV_PRIVATE 1.0.1 - cli_isnumber@CLAMAV_PRIVATE 1.0.1 - cli_js_destroy@CLAMAV_PRIVATE 1.0.1 - cli_js_init@CLAMAV_PRIVATE 1.0.1 - cli_js_output@CLAMAV_PRIVATE 1.0.1 - cli_js_parse_done@CLAMAV_PRIVATE 1.0.1 - cli_js_process_buffer@CLAMAV_PRIVATE 1.0.1 - cli_ldbtokenize@CLAMAV_PRIVATE 1.0.1 - cli_malloc@CLAMAV_PRIVATE 1.0.1 - cli_memstr@CLAMAV_PRIVATE 1.0.1 - cli_ole2_extract@CLAMAV_PRIVATE 1.0.1 - cli_pcre_build@CLAMAV_PRIVATE 1.0.1 - cli_pcre_freeoff@CLAMAV_PRIVATE 1.0.1 - cli_pcre_init@CLAMAV_PRIVATE 1.0.1 - cli_pcre_perf_events_destroy@CLAMAV_PRIVATE 1.0.1 - cli_pcre_perf_print@CLAMAV_PRIVATE 1.0.1 - cli_pcre_recaloff@CLAMAV_PRIVATE 1.0.1 - cli_pcre_scanbuf@CLAMAV_PRIVATE 1.0.1 - cli_ppt_vba_read@CLAMAV_PRIVATE 1.0.1 - cli_printcxxver@CLAMAV_PRIVATE 1.0.1 - cli_readn@CLAMAV_PRIVATE 1.0.1 - cli_realloc@CLAMAV_PRIVATE 1.0.1 - cli_realpath@CLAMAV_PRIVATE 1.0.1 - cli_regcomp@CLAMAV_PRIVATE 1.0.1 - cli_regex2suffix@CLAMAV_PRIVATE 1.0.1 - cli_regexec@CLAMAV_PRIVATE 1.0.1 - cli_regfree@CLAMAV_PRIVATE 1.0.1 - cli_rmdirs@CLAMAV_PRIVATE 1.0.1 - cli_rndnum@CLAMAV_PRIVATE 1.0.1 - cli_sanitize_filepath@CLAMAV_PRIVATE 1.0.1 - cli_scan_buff@CLAMAV_PRIVATE 1.0.1 - cli_scan_fmap@CLAMAV_PRIVATE 1.0.1 - cli_set_debug_flag@CLAMAV_PRIVATE 1.0.1 - cli_sigopts_handler@CLAMAV_PRIVATE 1.0.1 - cli_sigperf_events_destroy@CLAMAV_PRIVATE 1.0.1 - cli_sigperf_print@CLAMAV_PRIVATE 1.0.1 - cli_str2hex@CLAMAV_PRIVATE 1.0.1 - cli_strbcasestr@CLAMAV_PRIVATE 1.0.1 - cli_strdup@CLAMAV_PRIVATE 1.0.1 - cli_strerror@CLAMAV_PRIVATE 1.0.1 - cli_strlcat@CLAMAV_PRIVATE 1.0.1 - cli_strlcpy@CLAMAV_PRIVATE 1.0.1 - cli_strntoul@CLAMAV_PRIVATE 1.0.1 - cli_strrcpy@CLAMAV_PRIVATE 1.0.1 - cli_strtok@CLAMAV_PRIVATE 1.0.1 - cli_strtokbuf@CLAMAV_PRIVATE 1.0.1 - cli_strtokenize@CLAMAV_PRIVATE 1.0.1 - cli_textbuffer_append_normalize@CLAMAV_PRIVATE 1.0.1 - cli_unescape@CLAMAV_PRIVATE 1.0.1 - cli_unlink@CLAMAV_PRIVATE 1.0.1 - cli_url_canon@CLAMAV_PRIVATE 1.0.1 - cli_utf16_to_utf8@CLAMAV_PRIVATE 1.0.1 - cli_utf16toascii@CLAMAV_PRIVATE 1.0.1 - cli_vba_inflate@CLAMAV_PRIVATE 1.0.1 - cli_vba_readdir@CLAMAV_PRIVATE 1.0.1 - cli_versig2@CLAMAV_PRIVATE 1.0.1 - cli_versig@CLAMAV_PRIVATE 1.0.1 - cli_warnmsg@CLAMAV_PRIVATE 1.0.1 - cli_wm_decrypt_macro@CLAMAV_PRIVATE 1.0.1 - cli_wm_readdir@CLAMAV_PRIVATE 1.0.1 - cli_writen@CLAMAV_PRIVATE 1.0.1 - decodeLine@CLAMAV_PRIVATE 1.0.1 - disasmbuf@CLAMAV_PRIVATE 1.0.1 - fmap@CLAMAV_PRIVATE 1.0.1 - fmap_dump_to_file@CLAMAV_PRIVATE 1.0.1 - fmap_duplicate@CLAMAV_PRIVATE 1.0.1 - free_duplicate_fmap@CLAMAV_PRIVATE 1.0.1 - get_fpu_endian@CLAMAV_PRIVATE 1.0.1 - have_clamjit@CLAMAV_PRIVATE 1.0.1 - have_rar@CLAMAV_PRIVATE 1.0.1 - html_normalise_map@CLAMAV_PRIVATE 1.0.1 - html_normalise_mem@CLAMAV_PRIVATE 1.0.1 - html_screnc_decode@CLAMAV_PRIVATE 1.0.1 - html_tag_arg_free@CLAMAV_PRIVATE 1.0.1 - init_allow_list@CLAMAV_PRIVATE 1.0.1 - init_domain_list@CLAMAV_PRIVATE 1.0.1 - init_regex_list@CLAMAV_PRIVATE 1.0.1 - is_regex_ok@CLAMAV_PRIVATE 1.0.1 - load_regex_matcher@CLAMAV_PRIVATE 1.0.1 - lsig_increment_subsig_match@CLAMAV_PRIVATE 1.0.1 + cl_validate_certificate_chain@CLAMAV_PRIVATE 1.0.2 + cl_validate_certificate_chain_ts_dir@CLAMAV_PRIVATE 1.0.2 + cl_verify_signature@CLAMAV_PRIVATE 1.0.2 + cl_verify_signature_fd@CLAMAV_PRIVATE 1.0.2 + cl_verify_signature_fd_x509@CLAMAV_PRIVATE 1.0.2 + cl_verify_signature_fd_x509_keyfile@CLAMAV_PRIVATE 1.0.2 + cl_verify_signature_hash@CLAMAV_PRIVATE 1.0.2 + cl_verify_signature_hash_x509@CLAMAV_PRIVATE 1.0.2 + cl_verify_signature_hash_x509_keyfile@CLAMAV_PRIVATE 1.0.2 + cl_verify_signature_x509@CLAMAV_PRIVATE 1.0.2 + cl_verify_signature_x509_keyfile@CLAMAV_PRIVATE 1.0.2 + cli_ac_buildtrie@CLAMAV_PRIVATE 1.0.2 + cli_ac_chklsig@CLAMAV_PRIVATE 1.0.2 + cli_ac_free@CLAMAV_PRIVATE 1.0.2 + cli_ac_freedata@CLAMAV_PRIVATE 1.0.2 + cli_ac_init@CLAMAV_PRIVATE 1.0.2 + cli_ac_initdata@CLAMAV_PRIVATE 1.0.2 + cli_ac_scanbuff@CLAMAV_PRIVATE 1.0.2 + cli_add_content_match_pattern@CLAMAV_PRIVATE 1.0.2 + cli_basename@CLAMAV_PRIVATE 1.0.2 + cli_bm_free@CLAMAV_PRIVATE 1.0.2 + cli_bm_init@CLAMAV_PRIVATE 1.0.2 + cli_bm_scanbuff@CLAMAV_PRIVATE 1.0.2 + cli_build_regex_list@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_context_alloc@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_context_destroy@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_context_getresult_int@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_context_set_trace@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_context_setfile@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_context_setfuncid@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_context_setparam_int@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_context_setparam_ptr@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_debug@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_debug_printsrc@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_describe@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_destroy@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_done@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_init@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_load@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_prepare2@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_printversion@CLAMAV_PRIVATE 1.0.2 + cli_bytecode_run@CLAMAV_PRIVATE 1.0.2 + cli_bytefunc_describe@CLAMAV_PRIVATE 1.0.2 + cli_byteinst_describe@CLAMAV_PRIVATE 1.0.2 + cli_bytetype_describe@CLAMAV_PRIVATE 1.0.2 + cli_bytevalue_describe@CLAMAV_PRIVATE 1.0.2 + cli_calloc@CLAMAV_PRIVATE 1.0.2 + cli_check_auth_header@CLAMAV_PRIVATE 1.0.2 + cli_chomp@CLAMAV_PRIVATE 1.0.2 + cli_codepage_to_utf8@CLAMAV_PRIVATE 1.0.2 + cli_ctime@CLAMAV_PRIVATE 1.0.2 + cli_dbgmsg@CLAMAV_PRIVATE 1.0.2 + cli_dbgmsg_no_inline@CLAMAV_PRIVATE 1.0.2 + cli_dconf_init@CLAMAV_PRIVATE 1.0.2 + cli_debug_flag@CLAMAV_PRIVATE 1.0.2 + cli_detect_environment@CLAMAV_PRIVATE 1.0.2 + cli_disasm_one@CLAMAV_PRIVATE 1.0.2 + cli_errmsg@CLAMAV_PRIVATE 1.0.2 + cli_filecopy@CLAMAV_PRIVATE 1.0.2 + cli_free_vba_project@CLAMAV_PRIVATE 1.0.2 + cli_ftw@CLAMAV_PRIVATE 1.0.2 + cli_genhash_pe@CLAMAV_PRIVATE 1.0.2 + cli_gentemp@CLAMAV_PRIVATE 1.0.2 + cli_gentemp_with_prefix@CLAMAV_PRIVATE 1.0.2 + cli_gentempfd@CLAMAV_PRIVATE 1.0.2 + cli_gentempfd_with_prefix@CLAMAV_PRIVATE 1.0.2 + cli_get_debug_flag@CLAMAV_PRIVATE 1.0.2 + cli_get_filepath_from_filedesc@CLAMAV_PRIVATE 1.0.2 + cli_get_last_virus_str@CLAMAV_PRIVATE 1.0.2 + cli_getdsig@CLAMAV_PRIVATE 1.0.2 + cli_gettmpdir@CLAMAV_PRIVATE 1.0.2 + cli_hashfile@CLAMAV_PRIVATE 1.0.2 + cli_hashset_destroy@CLAMAV_PRIVATE 1.0.2 + cli_hashstream@CLAMAV_PRIVATE 1.0.2 + cli_hex2str@CLAMAV_PRIVATE 1.0.2 + cli_hex2ui@CLAMAV_PRIVATE 1.0.2 + cli_infomsg_simple@CLAMAV_PRIVATE 1.0.2 + cli_initroots@CLAMAV_PRIVATE 1.0.2 + cli_isnumber@CLAMAV_PRIVATE 1.0.2 + cli_js_destroy@CLAMAV_PRIVATE 1.0.2 + cli_js_init@CLAMAV_PRIVATE 1.0.2 + cli_js_output@CLAMAV_PRIVATE 1.0.2 + cli_js_parse_done@CLAMAV_PRIVATE 1.0.2 + cli_js_process_buffer@CLAMAV_PRIVATE 1.0.2 + cli_ldbtokenize@CLAMAV_PRIVATE 1.0.2 + cli_malloc@CLAMAV_PRIVATE 1.0.2 + cli_memstr@CLAMAV_PRIVATE 1.0.2 + cli_ole2_extract@CLAMAV_PRIVATE 1.0.2 + cli_pcre_build@CLAMAV_PRIVATE 1.0.2 + cli_pcre_freeoff@CLAMAV_PRIVATE 1.0.2 + cli_pcre_init@CLAMAV_PRIVATE 1.0.2 + cli_pcre_perf_events_destroy@CLAMAV_PRIVATE 1.0.2 + cli_pcre_perf_print@CLAMAV_PRIVATE 1.0.2 + cli_pcre_recaloff@CLAMAV_PRIVATE 1.0.2 + cli_pcre_scanbuf@CLAMAV_PRIVATE 1.0.2 + cli_ppt_vba_read@CLAMAV_PRIVATE 1.0.2 + cli_printcxxver@CLAMAV_PRIVATE 1.0.2 + cli_readn@CLAMAV_PRIVATE 1.0.2 + cli_realloc@CLAMAV_PRIVATE 1.0.2 + cli_realpath@CLAMAV_PRIVATE 1.0.2 + cli_regcomp@CLAMAV_PRIVATE 1.0.2 + cli_regex2suffix@CLAMAV_PRIVATE 1.0.2 + cli_regexec@CLAMAV_PRIVATE 1.0.2 + cli_regfree@CLAMAV_PRIVATE 1.0.2 + cli_rmdirs@CLAMAV_PRIVATE 1.0.2 + cli_rndnum@CLAMAV_PRIVATE 1.0.2 + cli_sanitize_filepath@CLAMAV_PRIVATE 1.0.2 + cli_scan_buff@CLAMAV_PRIVATE 1.0.2 + cli_scan_fmap@CLAMAV_PRIVATE 1.0.2 + cli_set_debug_flag@CLAMAV_PRIVATE 1.0.2 + cli_sigopts_handler@CLAMAV_PRIVATE 1.0.2 + cli_sigperf_events_destroy@CLAMAV_PRIVATE 1.0.2 + cli_sigperf_print@CLAMAV_PRIVATE 1.0.2 + cli_str2hex@CLAMAV_PRIVATE 1.0.2 + cli_strbcasestr@CLAMAV_PRIVATE 1.0.2 + cli_strdup@CLAMAV_PRIVATE 1.0.2 + cli_strerror@CLAMAV_PRIVATE 1.0.2 + cli_strlcat@CLAMAV_PRIVATE 1.0.2 + cli_strlcpy@CLAMAV_PRIVATE 1.0.2 + cli_strntoul@CLAMAV_PRIVATE 1.0.2 + cli_strrcpy@CLAMAV_PRIVATE 1.0.2 + cli_strtok@CLAMAV_PRIVATE 1.0.2 + cli_strtokbuf@CLAMAV_PRIVATE 1.0.2 + cli_strtokenize@CLAMAV_PRIVATE 1.0.2 + cli_textbuffer_append_normalize@CLAMAV_PRIVATE 1.0.2 + cli_unescape@CLAMAV_PRIVATE 1.0.2 + cli_unlink@CLAMAV_PRIVATE 1.0.2 + cli_url_canon@CLAMAV_PRIVATE 1.0.2 + cli_utf16_to_utf8@CLAMAV_PRIVATE 1.0.2 + cli_utf16toascii@CLAMAV_PRIVATE 1.0.2 + cli_vba_inflate@CLAMAV_PRIVATE 1.0.2 + cli_vba_readdir@CLAMAV_PRIVATE 1.0.2 + cli_versig2@CLAMAV_PRIVATE 1.0.2 + cli_versig@CLAMAV_PRIVATE 1.0.2 + cli_warnmsg@CLAMAV_PRIVATE 1.0.2 + cli_wm_decrypt_macro@CLAMAV_PRIVATE 1.0.2 + cli_wm_readdir@CLAMAV_PRIVATE 1.0.2 + cli_writen@CLAMAV_PRIVATE 1.0.2 + decodeLine@CLAMAV_PRIVATE 1.0.2 + disasmbuf@CLAMAV_PRIVATE 1.0.2 + fmap@CLAMAV_PRIVATE 1.0.2 + fmap_dump_to_file@CLAMAV_PRIVATE 1.0.2 + fmap_duplicate@CLAMAV_PRIVATE 1.0.2 + free_duplicate_fmap@CLAMAV_PRIVATE 1.0.2 + get_fpu_endian@CLAMAV_PRIVATE 1.0.2 + have_clamjit@CLAMAV_PRIVATE 1.0.2 + have_rar@CLAMAV_PRIVATE 1.0.2 + html_normalise_map@CLAMAV_PRIVATE 1.0.2 + html_normalise_mem@CLAMAV_PRIVATE 1.0.2 + html_screnc_decode@CLAMAV_PRIVATE 1.0.2 + html_tag_arg_free@CLAMAV_PRIVATE 1.0.2 + init_allow_list@CLAMAV_PRIVATE 1.0.2 + init_domain_list@CLAMAV_PRIVATE 1.0.2 + init_regex_list@CLAMAV_PRIVATE 1.0.2 + is_regex_ok@CLAMAV_PRIVATE 1.0.2 + load_regex_matcher@CLAMAV_PRIVATE 1.0.2 + lsig_increment_subsig_match@CLAMAV_PRIVATE 1.0.2 lsig_sub_matched@CLAMAV_PUBLIC 1.0.0 - messageCreate@CLAMAV_PRIVATE 1.0.1 - messageDestroy@CLAMAV_PRIVATE 1.0.1 - mpool_calloc@CLAMAV_PRIVATE 1.0.1 - mpool_create@CLAMAV_PRIVATE 1.0.1 - mpool_destroy@CLAMAV_PRIVATE 1.0.1 - mpool_free@CLAMAV_PRIVATE 1.0.1 - mpool_getstats@CLAMAV_PRIVATE 1.0.1 - phishingScan@CLAMAV_PRIVATE 1.0.1 - phishing_done@CLAMAV_PRIVATE 1.0.1 - phishing_init@CLAMAV_PRIVATE 1.0.1 - readdb_parse_ldb_subsignature@CLAMAV_PRIVATE 1.0.1 - regex_list_add_pattern@CLAMAV_PRIVATE 1.0.1 - regex_list_done@CLAMAV_PRIVATE 1.0.1 - regex_list_match@CLAMAV_PRIVATE 1.0.1 - tableCreate@CLAMAV_PRIVATE 1.0.1 - tableDestroy@CLAMAV_PRIVATE 1.0.1 - tableFind@CLAMAV_PRIVATE 1.0.1 - tableInsert@CLAMAV_PRIVATE 1.0.1 - tableIterate@CLAMAV_PRIVATE 1.0.1 - tableRemove@CLAMAV_PRIVATE 1.0.1 - tableUpdate@CLAMAV_PRIVATE 1.0.1 - text_normalize_init@CLAMAV_PRIVATE 1.0.1 - text_normalize_map@CLAMAV_PRIVATE 1.0.1 - text_normalize_reset@CLAMAV_PRIVATE 1.0.1 - uniq_add@CLAMAV_PRIVATE 1.0.1 - uniq_free@CLAMAV_PRIVATE 1.0.1 - uniq_get@CLAMAV_PRIVATE 1.0.1 - uniq_init@CLAMAV_PRIVATE 1.0.1 + messageCreate@CLAMAV_PRIVATE 1.0.2 + messageDestroy@CLAMAV_PRIVATE 1.0.2 + mpool_calloc@CLAMAV_PRIVATE 1.0.2 + mpool_create@CLAMAV_PRIVATE 1.0.2 + mpool_destroy@CLAMAV_PRIVATE 1.0.2 + mpool_free@CLAMAV_PRIVATE 1.0.2 + mpool_getstats@CLAMAV_PRIVATE 1.0.2 + phishingScan@CLAMAV_PRIVATE 1.0.2 + phishing_done@CLAMAV_PRIVATE 1.0.2 + phishing_init@CLAMAV_PRIVATE 1.0.2 + readdb_parse_ldb_subsignature@CLAMAV_PRIVATE 1.0.2 + regex_list_add_pattern@CLAMAV_PRIVATE 1.0.2 + regex_list_done@CLAMAV_PRIVATE 1.0.2 + regex_list_match@CLAMAV_PRIVATE 1.0.2 + tableCreate@CLAMAV_PRIVATE 1.0.2 + tableDestroy@CLAMAV_PRIVATE 1.0.2 + tableFind@CLAMAV_PRIVATE 1.0.2 + tableInsert@CLAMAV_PRIVATE 1.0.2 + tableIterate@CLAMAV_PRIVATE 1.0.2 + tableRemove@CLAMAV_PRIVATE 1.0.2 + tableUpdate@CLAMAV_PRIVATE 1.0.2 + text_normalize_init@CLAMAV_PRIVATE 1.0.2 + text_normalize_map@CLAMAV_PRIVATE 1.0.2 + text_normalize_reset@CLAMAV_PRIVATE 1.0.2 + uniq_add@CLAMAV_PRIVATE 1.0.2 + uniq_free@CLAMAV_PRIVATE 1.0.2 + uniq_get@CLAMAV_PRIVATE 1.0.2 + uniq_init@CLAMAV_PRIVATE 1.0.2 libfreshclam.so.2 libclamav11 #MINVER# FRESHCLAM_PRIVATE@FRESHCLAM_PRIVATE 1.0.1 FRESHCLAM_PUBLIC@FRESHCLAM_PUBLIC 1.0.0 diff -Nru clamav-1.0.1+dfsg/debian/patches/Add-an-option-to-avoid-setting-RPATH-on-unix-systems.patch clamav-1.0.2+dfsg/debian/patches/Add-an-option-to-avoid-setting-RPATH-on-unix-systems.patch --- clamav-1.0.1+dfsg/debian/patches/Add-an-option-to-avoid-setting-RPATH-on-unix-systems.patch 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/patches/Add-an-option-to-avoid-setting-RPATH-on-unix-systems.patch 2023-08-19 16:44:39.000000000 +0000 @@ -1,4 +1,4 @@ -From b1b7438ad627cb935c5e7b9923342bc2e26d4137 Mon Sep 17 00:00:00 2001 +From 9824d59fcb634e349490a49997a294ab6a9f7020 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 6 Jan 2023 23:11:00 +0100 Subject: Add an option to avoid setting RPATH on unix systems. @@ -16,7 +16,7 @@ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt -index a146bc1..ca8fd0c 100644 +index 38a69f3..6bf9620 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -174,13 +174,13 @@ endif() @@ -36,10 +36,10 @@ set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_FULL_LIBDIR}") else() diff --git a/CMakeOptions.cmake b/CMakeOptions.cmake -index 2b377cd..aac5854 100644 +index d995bac..9275352 100644 --- a/CMakeOptions.cmake +++ b/CMakeOptions.cmake -@@ -123,3 +123,6 @@ option(ENABLE_EXTERNAL_TOMFASTMATH +@@ -120,3 +120,6 @@ option(ENABLE_SYSTEMD # Rust Targets: https://doc.rust-lang.org/nightly/rustc/platform-support.html option(RUST_COMPILER_TARGET "Use a custom target triple to build the Rust components. Needed for cross-compiling.") diff -Nru clamav-1.0.1+dfsg/debian/patches/Add-a-version-script-for-libclamav-and-libfreshclam.patch clamav-1.0.2+dfsg/debian/patches/Add-a-version-script-for-libclamav-and-libfreshclam.patch --- clamav-1.0.1+dfsg/debian/patches/Add-a-version-script-for-libclamav-and-libfreshclam.patch 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/patches/Add-a-version-script-for-libclamav-and-libfreshclam.patch 2023-08-19 16:44:39.000000000 +0000 @@ -1,4 +1,4 @@ -From 6970538d0bd11d97d414a6744a670dbe6d8b8909 Mon Sep 17 00:00:00 2001 +From badbc203114a85e8bdec2deecc25f899331c05f4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 2 Jan 2023 15:51:42 +0100 Subject: Add a version script for libclamav and libfreshclam @@ -20,10 +20,10 @@ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/libclamav/CMakeLists.txt b/libclamav/CMakeLists.txt -index 0d35e3a..61346b9 100644 +index 6bc426f..f0b3fdf 100644 --- a/libclamav/CMakeLists.txt +++ b/libclamav/CMakeLists.txt -@@ -508,7 +508,8 @@ if(ENABLE_SHARED_LIB) +@@ -505,7 +505,8 @@ if(ENABLE_SHARED_LIB) add_library( clamav SHARED ) set_target_properties( clamav PROPERTIES VERSION ${LIBCLAMAV_VERSION} @@ -33,7 +33,7 @@ target_sources( clamav PRIVATE ${LIBCLAMAV_SOURCES} -@@ -554,6 +555,8 @@ if(ENABLE_SHARED_LIB) +@@ -551,6 +552,8 @@ if(ENABLE_SHARED_LIB) Iconv::Iconv ${CMAKE_DL_LIBS} m ) diff -Nru clamav-1.0.1+dfsg/debian/patches/cargo-Remove-windows-referenfes.patch clamav-1.0.2+dfsg/debian/patches/cargo-Remove-windows-referenfes.patch --- clamav-1.0.1+dfsg/debian/patches/cargo-Remove-windows-referenfes.patch 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/patches/cargo-Remove-windows-referenfes.patch 2023-08-19 16:44:39.000000000 +0000 @@ -1,4 +1,4 @@ -From 95172b6139277bb52e3970d5d595f12f14fa3efc Mon Sep 17 00:00:00 2001 +From 410b3222b6d7975649468a0470a5dea31476b04e Mon Sep 17 00:00:00 2001 From: Scott Kitterman Date: Sat, 31 Dec 2022 12:12:58 +0100 Subject: cargo: Remove windows referenfes. @@ -20,8 +20,8 @@ .../.cargo/vendor/remove_dir_all/Cargo.toml | 4 ++-- .../.cargo/vendor/tempfile/.cargo-checksum.json | 2 +- libclamav_rust/.cargo/vendor/tempfile/Cargo.toml | 6 +++--- - .../.cargo/vendor/termcolor/.cargo-checksum.json | 2 +- - libclamav_rust/.cargo/vendor/termcolor/Cargo.toml | 4 ++-- + .../.cargo/vendor/which/.cargo-checksum.json | 2 +- + libclamav_rust/.cargo/vendor/which/Cargo.toml | 4 ++-- 12 files changed, 25 insertions(+), 25 deletions(-) diff --git a/libclamav_rust/.cargo/vendor/ansi_term/.cargo-checksum.json b/libclamav_rust/.cargo/vendor/ansi_term/.cargo-checksum.json @@ -141,23 +141,23 @@ +#[target."cfg(windows)".dependencies.winapi] +#version = "0.3" +#features = ["fileapi", "handleapi", "winbase"] -diff --git a/libclamav_rust/.cargo/vendor/termcolor/.cargo-checksum.json b/libclamav_rust/.cargo/vendor/termcolor/.cargo-checksum.json -index 6032d23..72c84f9 100644 ---- a/libclamav_rust/.cargo/vendor/termcolor/.cargo-checksum.json -+++ b/libclamav_rust/.cargo/vendor/termcolor/.cargo-checksum.json -@@ -1 +1 @@ --{"files":{"COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.toml":"2e1ffefd2c70d47b5097d7ecc26184d92e4e2be1174c53147a617729024a4a51","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"cc4c882bde8d2ef26ef4770ff30d60eda603d87ae32e16d99525dc88f3377238","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/lib.rs":"fe62bc640112ffb687366fbe4a084ed3bf749185f77d1e401757ab148313fb7e"},"package":"be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"} -\ No newline at end of file -+{"files":{"COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"cc4c882bde8d2ef26ef4770ff30d60eda603d87ae32e16d99525dc88f3377238","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/lib.rs":"fe62bc640112ffb687366fbe4a084ed3bf749185f77d1e401757ab148313fb7e"},"package":"be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"} -diff --git a/libclamav_rust/.cargo/vendor/termcolor/Cargo.toml b/libclamav_rust/.cargo/vendor/termcolor/Cargo.toml -index dbdb6e8..6d7abff 100644 ---- a/libclamav_rust/.cargo/vendor/termcolor/Cargo.toml -+++ b/libclamav_rust/.cargo/vendor/termcolor/Cargo.toml -@@ -36,5 +36,5 @@ bench = false +diff --git a/libclamav_rust/.cargo/vendor/which/.cargo-checksum.json b/libclamav_rust/.cargo/vendor/which/.cargo-checksum.json +index 7b3c3ca..69fea07 100644 +--- a/libclamav_rust/.cargo/vendor/which/.cargo-checksum.json ++++ b/libclamav_rust/.cargo/vendor/which/.cargo-checksum.json +@@ -1 +1 @@ +-{"files":{"Cargo.toml":"52b2c35d7270c5db75872052c2f8e56740f3c4ccf48b4a17be7b600c57bf24a0","LICENSE.txt":"0041560f5d419c30e1594567f3b7ac2bc078ff6a68f437e0348ba85d9cf99112","README.md":"8b16d6a129cb05c3b6ed15e5eacbd7ca488a5005f3d22d3376cc75157996f1dc","src/checker.rs":"e17ca8bcccedfba17ba027e86de970a01d6d207ba442174184952966eeaba140","src/error.rs":"00315874353628366851cd0817a60059cb2c784fd315407a2c30f38021b18dc6","src/finder.rs":"71d09b164ebf51e70dc67b6e4db78bc1c10afedc6473b1edb795d36bd3a3c83b","src/helper.rs":"42cf60a98c017fcbf96d8cbf5880398b4f191c4b2445c43028c35ad57a1b846a","src/lib.rs":"53926af5cadb33966a6d7e0bdd87a48470ac703f144da77212edbedf88bb0692","tests/basic.rs":"90e2c26bc1402fea996e91342f0c299cc91fb54e82445b0bb46715a77660059b"},"package":"2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269"} +\ No newline at end of file ++{"files":{"LICENSE.txt":"0041560f5d419c30e1594567f3b7ac2bc078ff6a68f437e0348ba85d9cf99112","README.md":"8b16d6a129cb05c3b6ed15e5eacbd7ca488a5005f3d22d3376cc75157996f1dc","src/checker.rs":"e17ca8bcccedfba17ba027e86de970a01d6d207ba442174184952966eeaba140","src/error.rs":"00315874353628366851cd0817a60059cb2c784fd315407a2c30f38021b18dc6","src/finder.rs":"71d09b164ebf51e70dc67b6e4db78bc1c10afedc6473b1edb795d36bd3a3c83b","src/helper.rs":"42cf60a98c017fcbf96d8cbf5880398b4f191c4b2445c43028c35ad57a1b846a","src/lib.rs":"53926af5cadb33966a6d7e0bdd87a48470ac703f144da77212edbedf88bb0692","tests/basic.rs":"90e2c26bc1402fea996e91342f0c299cc91fb54e82445b0bb46715a77660059b"},"package":"2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269"} +diff --git a/libclamav_rust/.cargo/vendor/which/Cargo.toml b/libclamav_rust/.cargo/vendor/which/Cargo.toml +index ff6894a..6655f3f 100644 +--- a/libclamav_rust/.cargo/vendor/which/Cargo.toml ++++ b/libclamav_rust/.cargo/vendor/which/Cargo.toml +@@ -46,5 +46,5 @@ optional = true + [dev-dependencies.tempfile] + version = "3.3.0" - [dev-dependencies] - --[target."cfg(windows)".dependencies.winapi-util] --version = "0.1.3" -+#[target."cfg(windows)".dependencies.winapi-util] -+#version = "0.1.3" +-[target."cfg(windows)".dependencies.once_cell] +-version = "1" ++#[target."cfg(windows)".dependencies.once_cell] ++#version = "1" diff -Nru clamav-1.0.1+dfsg/debian/patches/Change-paths-in-sample-conf-file-to-match-Debian.patch clamav-1.0.2+dfsg/debian/patches/Change-paths-in-sample-conf-file-to-match-Debian.patch --- clamav-1.0.1+dfsg/debian/patches/Change-paths-in-sample-conf-file-to-match-Debian.patch 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/patches/Change-paths-in-sample-conf-file-to-match-Debian.patch 2023-08-19 16:44:38.000000000 +0000 @@ -1,4 +1,4 @@ -From feedfa082d0b4107768ecb8fbede3e9488291924 Mon Sep 17 00:00:00 2001 +From 3ba303b9d6e78d5b2b7f28b71efda5e0812dba14 Mon Sep 17 00:00:00 2001 From: Scott Kitterman Date: Mon, 10 Mar 2014 19:20:18 -0400 Subject: Change paths in sample conf file to match Debian diff -Nru clamav-1.0.1+dfsg/debian/patches/clamd_dont_depend_on_clamav_demon_socket.patch clamav-1.0.2+dfsg/debian/patches/clamd_dont_depend_on_clamav_demon_socket.patch --- clamav-1.0.1+dfsg/debian/patches/clamd_dont_depend_on_clamav_demon_socket.patch 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/patches/clamd_dont_depend_on_clamav_demon_socket.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -From 8a47e2c733b7b5ffda3eac7605ded80e935f510c Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 11 Aug 2016 21:54:10 +0200 -Subject: clamd: don't depend on clamav-demon.socket - -Let's try to live without it. -This should avoid the endless loop in #824042. Newer systemd have -rate-limiting on (re)starts. This rate-limiting would stop the socket -service. The only purpose for the socket activation is to get clamd -started after the initial freshclam run on installs so I think we can -live without and manually start the daemon after installation. - -Patch-Name: clamd_dont_depend_on_clamav_demon_socket.patch -Signed-off-by: Sebastian Andrzej Siewior ---- - clamd/clamav-daemon.service.in | 2 -- - 1 file changed, 2 deletions(-) - -diff --git a/clamd/clamav-daemon.service.in b/clamd/clamav-daemon.service.in -index 579a512..d84a2b0 100644 ---- a/clamd/clamav-daemon.service.in -+++ b/clamd/clamav-daemon.service.in -@@ -1,7 +1,6 @@ - [Unit] - Description=Clam AntiVirus userspace daemon - Documentation=man:clamd(8) man:clamd.conf(5) https://docs.clamav.net/ --Requires=clamav-daemon.socket - # Check for database existence - ConditionPathExistsGlob=@DATADIR@/main.{c[vl]d,inc} - ConditionPathExistsGlob=@DATADIR@/daily.{c[vl]d,inc} -@@ -14,4 +13,3 @@ TimeoutStartSec=420 - - [Install] - WantedBy=multi-user.target --Also=clamav-daemon.socket diff -Nru clamav-1.0.1+dfsg/debian/patches/libclamav-Add-missing-symbols.patch clamav-1.0.2+dfsg/debian/patches/libclamav-Add-missing-symbols.patch --- clamav-1.0.1+dfsg/debian/patches/libclamav-Add-missing-symbols.patch 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/patches/libclamav-Add-missing-symbols.patch 2023-08-19 16:44:39.000000000 +0000 @@ -1,4 +1,4 @@ -From a9827003989996fdee7295de7af2c2cb976a00aa Mon Sep 17 00:00:00 2001 +From 79c704216edd15b41173f889d468f2e01a5aeb16 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 2 Jan 2023 16:20:39 +0100 Subject: libclamav: Add missing symbols. diff -Nru clamav-1.0.1+dfsg/debian/patches/libclamav-pe-Use-endian-wrapper-in-more-places.patch clamav-1.0.2+dfsg/debian/patches/libclamav-pe-Use-endian-wrapper-in-more-places.patch --- clamav-1.0.1+dfsg/debian/patches/libclamav-pe-Use-endian-wrapper-in-more-places.patch 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/patches/libclamav-pe-Use-endian-wrapper-in-more-places.patch 2023-08-19 16:44:39.000000000 +0000 @@ -1,4 +1,4 @@ -From 2abd896f6c7c91cc13f06cb10beedb62380d24d5 Mon Sep 17 00:00:00 2001 +From 47db1ecb0cb356d7f08f515965859221e0dca1af Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 6 Jan 2023 21:42:30 +0100 Subject: libclamav/pe: Use endian wrapper in more places. diff -Nru clamav-1.0.1+dfsg/debian/patches/libclamav-Sort-libclamav.map-and-libfreshclam.map.patch clamav-1.0.2+dfsg/debian/patches/libclamav-Sort-libclamav.map-and-libfreshclam.map.patch --- clamav-1.0.1+dfsg/debian/patches/libclamav-Sort-libclamav.map-and-libfreshclam.map.patch 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/patches/libclamav-Sort-libclamav.map-and-libfreshclam.map.patch 2023-08-19 16:44:39.000000000 +0000 @@ -1,4 +1,4 @@ -From 591c101430987a4e8d1a01e2b2daccf35526438c Mon Sep 17 00:00:00 2001 +From 590e38aa59c54f91e0c1cbe542260c7bfb5d8e50 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 2 Jan 2023 16:13:35 +0100 Subject: libclamav: Sort libclamav.map and libfreshclam.map diff -Nru clamav-1.0.1+dfsg/debian/patches/libclamav-Use-OpenSSL-BN-instead-tomfastmath.patch clamav-1.0.2+dfsg/debian/patches/libclamav-Use-OpenSSL-BN-instead-tomfastmath.patch --- clamav-1.0.1+dfsg/debian/patches/libclamav-Use-OpenSSL-BN-instead-tomfastmath.patch 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/patches/libclamav-Use-OpenSSL-BN-instead-tomfastmath.patch 2023-08-19 16:44:39.000000000 +0000 @@ -0,0 +1,1132 @@ +From 1ed2932ff2ba201883c4ddeb1cfc954d04f7f57e Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Sat, 18 Feb 2023 10:47:53 +0100 +Subject: libclamav: Use OpenSSL' BN instead tomfastmath. + +Use OpenSSL's big number/ multiprecision integer arithmetics +functionality to replace tomfastmath. + +This is a first shot at doing just this. Further improvement could be +use more RSA-signature verification from OpenSSL in crtmgr_rsa_verify() +and less self parsing. +_padding_check_PKCS1_type_1() has been borrowed from OpenSSL to make +further replacments easier. + +Patch-Name: libclamav-Use-OpenSSL-BN-instead-tomfastmath.patch +Signed-off-by: Sebastian Andrzej Siewior +--- + libclamav/CMakeLists.txt | 3 +- + libclamav/asn1.c | 33 +++- + libclamav/bignum.h | 14 -- + libclamav/crtmgr.c | 405 ++++++++++++++++++++++++++------------- + libclamav/crtmgr.h | 14 +- + libclamav/dsig.c | 175 ++++++++++++----- + libclamav/dsig.h | 2 +- + libclamav/readdb.c | 24 ++- + libclamav/textnorm.c | 1 - + libclamav/xdp.c | 1 - + 10 files changed, 443 insertions(+), 229 deletions(-) + delete mode 100644 libclamav/bignum.h + +diff --git a/libclamav/CMakeLists.txt b/libclamav/CMakeLists.txt +index f0b3fdf..5eb2e95 100644 +--- a/libclamav/CMakeLists.txt ++++ b/libclamav/CMakeLists.txt +@@ -250,8 +250,7 @@ target_sources( tomsfastmath + tomsfastmath/sqr/fp_sqr_comba_generic.c + tomsfastmath/sqr/fp_sqr_comba_small_set.c + tomsfastmath/sqr/fp_sqrmod.c +- PUBLIC +- bignum.h ) ++ ) + target_include_directories( tomsfastmath + PRIVATE + ${CMAKE_BINARY_DIR} +diff --git a/libclamav/asn1.c b/libclamav/asn1.c +index 1eec3b0..880dbc5 100644 +--- a/libclamav/asn1.c ++++ b/libclamav/asn1.c +@@ -24,10 +24,10 @@ + #endif + + #include ++#include + + #include "clamav.h" + #include "asn1.h" +-#include "bignum.h" + #include "matcher-hash.h" + + /* --------------------------------------------------------------------------- OIDS */ +@@ -695,7 +695,8 @@ static int asn1_get_rsa_pubkey(fmap_t *map, const void **asn1data, unsigned int + return 1; + } + +- fp_read_unsigned_bin(&x509->n, obj.content, avail2); ++ if (!BN_bin2bn(obj.content, avail2, x509->n)) ++ return 1; + + if (asn1_expect_objtype(map, obj.next, &avail, &obj, ASN1_TYPE_INTEGER)) /* INTEGER - exp */ + return 1; +@@ -712,7 +713,8 @@ static int asn1_get_rsa_pubkey(fmap_t *map, const void **asn1data, unsigned int + return 1; + } + +- fp_read_unsigned_bin(&x509->e, obj.content, obj.size); ++ if (!BN_bin2bn(obj.content, obj.size, x509->e)) ++ return 1; + + return 0; + } +@@ -738,9 +740,12 @@ static int asn1_get_x509(fmap_t *map, const void **asn1data, unsigned int *size, + int ret = ASN1_GET_X509_UNRECOVERABLE_ERROR; + unsigned int version; + +- cli_crt_init(&x509); +- + do { ++ if (cli_crt_init(&x509) < 0) { ++ cli_dbgmsg("asn1_get_x509: failed to initialize x509.\n"); ++ break; ++ } ++ + if (asn1_expect_objtype(map, *asn1data, size, &crt, ASN1_TYPE_SEQUENCE)) { /* SEQUENCE */ + cli_dbgmsg("asn1_get_x509: expected SEQUENCE at the x509 start\n"); + break; +@@ -1107,7 +1112,8 @@ static int asn1_get_x509(fmap_t *map, const void **asn1data, unsigned int *size, + break; + } + +- fp_read_unsigned_bin(&x509.sig, obj.content, obj.size); ++ if (!BN_bin2bn(obj.content, obj.size, x509.sig)) ++ break; + + if (crt.size) { + cli_dbgmsg("asn1_get_x509: found unexpected extra data in signature\n"); +@@ -1404,6 +1410,8 @@ static cl_error_t asn1_parse_mscat(struct cl_engine *engine, fmap_t *map, size_t + void *hash_ctx; + int result; + cl_error_t ret = CL_EPARSE; ++ char *mod = NULL; ++ char *exp = NULL; + + cli_dbgmsg("in asn1_parse_mscat\n"); + +@@ -1558,11 +1566,10 @@ static cl_error_t asn1_parse_mscat(struct cl_engine *engine, fmap_t *map, size_t + while (x509) { + char raw_issuer[CRT_RAWMAXLEN * 2 + 1], raw_subject[CRT_RAWMAXLEN * 2 + 1], raw_serial[CRT_RAWMAXLEN * 3 + 1]; + char issuer[SHA1_HASH_SIZE * 2 + 1], subject[SHA1_HASH_SIZE * 2 + 1], serial[SHA1_HASH_SIZE * 2 + 1]; +- char mod[1024 + 1], exp[1024 + 1]; +- int j = 1024; ++ int j; + +- fp_toradix_n(&x509->n, mod, 16, j + 1); +- fp_toradix_n(&x509->e, exp, 16, j + 1); ++ mod = BN_bn2hex(x509->n); ++ exp = BN_bn2hex(x509->e); + memset(raw_issuer, 0, CRT_RAWMAXLEN * 2 + 1); + memset(raw_subject, 0, CRT_RAWMAXLEN * 2 + 1); + memset(raw_serial, 0, CRT_RAWMAXLEN * 2 + 1); +@@ -1594,6 +1601,10 @@ static cl_error_t asn1_parse_mscat(struct cl_engine *engine, fmap_t *map, size_t + cli_dbgmsg(" raw_issuer: %s\n", raw_issuer); + + x509 = x509->next; ++ OPENSSL_free(mod); ++ OPENSSL_free(exp); ++ mod = NULL; ++ exp = NULL; + } + x509 = newcerts.crts; + } +@@ -2149,6 +2160,8 @@ static cl_error_t asn1_parse_mscat(struct cl_engine *engine, fmap_t *map, size_t + } while (0); + + finish: ++ OPENSSL_free(mod); ++ OPENSSL_free(exp); + if (CL_EPARSE == ret) { + cli_dbgmsg("asn1_parse_mscat: failed to parse authenticode section\n"); + } +diff --git a/libclamav/bignum.h b/libclamav/bignum.h +deleted file mode 100644 +index a1c6d6e..0000000 +--- a/libclamav/bignum.h ++++ /dev/null +@@ -1,14 +0,0 @@ +-#ifndef BIGNUM_H_ +-#define BIGNUM_H_ +- +-#if HAVE_CONFIG_H +-#include "clamav-config.h" +-#endif +- +-#if HAVE_SYSTEM_TOMSFASTMATH +-#include +-#else +-#include "tomsfastmath/headers/tfm.h" +-#endif +- +-#endif +diff --git a/libclamav/crtmgr.c b/libclamav/crtmgr.c +index 571b1a7..3943e14 100644 +--- a/libclamav/crtmgr.c ++++ b/libclamav/crtmgr.c +@@ -42,20 +42,39 @@ + #define OID_2_16_840_1_101_3_4_2_3 "\x60\x86\x48\x01\x65\x03\x04\x02\x03" + #define OID_sha512 OID_2_16_840_1_101_3_4_2_3 + +-#define FP_INIT_MULTI(a, b, c) (fp_init(a), fp_init(b), fp_init(c)) +-#define FP_CLEAR_MULTI(...) ++static int cli_crt_init_fps(cli_crt *x509) ++{ ++ x509->n = BN_new(); ++ x509->e = BN_new(); ++ x509->sig = BN_new(); + +-void cli_crt_init(cli_crt *x509) ++ if (!x509->n || !x509->e || !x509->sig) { ++ BN_free(x509->n); ++ BN_free(x509->e); ++ BN_free(x509->sig); ++ ++ x509->n = NULL; ++ x509->e = NULL; ++ x509->sig = NULL; ++ return -1; ++ } ++ return 0; ++} ++ ++int cli_crt_init(cli_crt *x509) + { + memset(x509, 0, sizeof(*x509)); +- +- // FP_INIT_MULTI is a memset for each and cannot fail. +- FP_INIT_MULTI(&x509->n, &x509->e, &x509->sig); ++ return cli_crt_init_fps(x509); + } + + void cli_crt_clear(cli_crt *x509) + { +- FP_CLEAR_MULTI(&x509->n, &x509->e, &x509->sig); ++ BN_free(x509->n); ++ BN_free(x509->e); ++ BN_free(x509->sig); ++ x509->n = NULL; ++ x509->e = NULL; ++ x509->sig = NULL; + } + + /* Look for an existing certificate in the trust store `m`. This search allows +@@ -118,7 +137,7 @@ cli_crt *crtmgr_trust_list_lookup(crtmgr *m, cli_crt *x509, int crb_crts_only) + if (x509->hashtype != i->hashtype || + memcmp(x509->issuer, i->issuer, sizeof(i->issuer)) || + x509->ignore_serial != i->ignore_serial || +- fp_cmp(&x509->e, &i->e)) { ++ BN_cmp(x509->e, i->e)) { + continue; + } + } +@@ -135,7 +154,7 @@ cli_crt *crtmgr_trust_list_lookup(crtmgr *m, cli_crt *x509, int crb_crts_only) + (i->codeSign | x509->codeSign) == i->codeSign && + (i->timeSign | x509->timeSign) == i->timeSign && + !memcmp(x509->subject, i->subject, sizeof(i->subject)) && +- !fp_cmp(&x509->n, &i->n)) { ++ !BN_cmp(x509->n, i->n)) { + return i; + } + } +@@ -166,7 +185,7 @@ cli_crt *crtmgr_block_list_lookup(crtmgr *m, cli_crt *x509) + + if (!i->isBlocked || + memcmp(i->subject, x509->subject, sizeof(i->subject)) || +- fp_cmp(&x509->n, &i->n)) { ++ BN_cmp(x509->n, i->n)) { + continue; + } + +@@ -191,37 +210,51 @@ cli_crt *crtmgr_lookup(crtmgr *m, cli_crt *x509) + } + } + +-int crtmgr_add(crtmgr *m, cli_crt *x509) ++bool crtmgr_add(crtmgr *m, cli_crt *x509) + { +- cli_crt *i; ++ bool failed = true; ++ cli_crt *i = NULL; + + if (x509->isBlocked) { + if (crtmgr_block_list_lookup(m, x509)) { + cli_dbgmsg("crtmgr_add: duplicate blocked certificate detected - not adding\n"); +- return 0; ++ failed = false; ++ goto done; + } + } else { + if (crtmgr_trust_list_lookup(m, x509, 0)) { + cli_dbgmsg("crtmgr_add: duplicate trusted certificate detected - not adding\n"); +- return 0; ++ failed = false; ++ goto done; + } + } + + i = cli_malloc(sizeof(*i)); +- if (!i) +- return 1; ++ if (i == NULL) { ++ goto done; ++ } + +- // FP_INIT_MULTI is a memset for each and cannot fail. +- FP_INIT_MULTI(&i->n, &i->e, &i->sig); ++ if (cli_crt_init_fps(i) < 0) { ++ goto done; ++ } + +- fp_copy(&x509->n, &i->n); +- fp_copy(&x509->e, &i->e); +- fp_copy(&x509->sig, &i->sig); ++ if (!BN_copy(i->n, x509->n)) { ++ goto done; ++ } ++ if (!BN_copy(i->e, x509->e)) { ++ goto done; ++ } ++ if (!BN_copy(i->sig, x509->sig)) { ++ goto done; ++ } + +- if ((x509->name)) ++ if (x509->name) { + i->name = strdup(x509->name); +- else ++ if (!i->name) ++ goto done; ++ } else { + i->name = NULL; ++ } + + memcpy(i->raw_subject, x509->raw_subject, sizeof(i->raw_subject)); + memcpy(i->raw_issuer, x509->raw_issuer, sizeof(i->raw_issuer)); +@@ -240,12 +273,23 @@ int crtmgr_add(crtmgr *m, cli_crt *x509) + i->isBlocked = x509->isBlocked; + i->next = m->crts; + i->prev = NULL; +- if (m->crts) ++ if (m->crts) { + m->crts->prev = i; ++ } + m->crts = i; + + m->items++; +- return 0; ++ ++ failed = false; ++ i = NULL; ++ ++done: ++ if (i != NULL) { ++ cli_crt_clear(i); ++ free(i); ++ } ++ ++ return failed; + } + + void crtmgr_init(crtmgr *m) +@@ -281,12 +325,133 @@ void crtmgr_free(crtmgr *m) + crtmgr_del(m, m->crts); + } + +-static int crtmgr_rsa_verify(cli_crt *x509, fp_int *sig, cli_crt_hashtype hashtype, const uint8_t *refhash) ++static cl_error_t _padding_check_PKCS1_type_1(uint8_t **to, int *tlen, ++ uint8_t *from, unsigned int flen, ++ unsigned int num) + { +- int keylen = fp_unsigned_bin_size(&x509->n), siglen = fp_unsigned_bin_size(sig); +- int ret, j, objlen, hashlen; +- uint8_t d[513]; +- fp_int x; ++ int i, j; ++ unsigned char *p; ++ ++ p = from; ++ ++ /* ++ * The format is ++ * 00 || 01 || PS || 00 || D ++ * PS - padding string, at least 8 bytes of FF ++ * D - data. ++ */ ++ ++ if (num < 11) /* RSA_PKCS1_PADDING_SIZE */ ++ return CL_EPARSE; ++ ++ /* Accept inputs with and without the leading 0-byte. */ ++ if (num == flen) { ++ if ((*p++) != 0x00) { ++ cli_dbgmsg("%s: Bad padding\n", __func__); ++ return CL_EPARSE; ++ } ++ flen--; ++ } ++ ++ if ((num != (flen + 1)) || (*(p++) != 0x01)) { ++ cli_dbgmsg("%s: Bad block type\n", __func__); ++ return CL_EPARSE; ++ } ++ ++ /* scan over padding data */ ++ j = flen - 1; /* one for type. */ ++ for (i = 0; i < j; i++) { ++ if (*p != 0xff) { /* should decrypt to 0xff */ ++ if (*p == 0) { ++ p++; ++ break; ++ } else { ++ cli_dbgmsg("%s: Bad header\n", __func__); ++ return CL_EPARSE; ++ } ++ } ++ p++; ++ } ++ ++ if (i == j) { ++ cli_dbgmsg("%s: Bad header\n", __func__); ++ return CL_EPARSE; ++ } ++ ++ if (i < 8) { ++ cli_dbgmsg("%s: Bad padding\n", __func__); ++ return CL_EPARSE; ++ } ++ i++; /* Skip over the '\0' */ ++ j -= i; ++ *tlen = j; ++ *to = p; ++ ++ return CL_SUCCESS; ++} ++ ++static cl_error_t crtmgr_get_recov_data(BIGNUM *sig, cli_crt *x509, ++ uint8_t **buffer, uint8_t **payload, ++ int *payload_len) ++{ ++ BN_CTX *bnctx; ++ int pad_size; ++ int keylen; ++ uint8_t *d; ++ BIGNUM *x; ++ cl_error_t ret; ++ ++ *buffer = NULL; ++ *payload = NULL; ++ *payload_len = 0; ++ ret = CL_ERROR; ++ ++ keylen = BN_num_bytes(x509->n); ++ bnctx = BN_CTX_new(); ++ if (!bnctx) ++ goto done; ++ ++ x = BN_new(); ++ if (!x) ++ goto done; ++ ++ MALLOC(d, keylen); ++ ++ if (!BN_mod_exp(x, sig, x509->e, x509->n, bnctx)) { ++ cli_warnmsg("crtmgr_rsa_verify: verification failed: BN_mod_exp failed.\n"); ++ goto done; ++ } ++ ++ pad_size = BN_bn2bin(x, d); ++ if (pad_size < 0) { ++ cli_dbgmsg("crtmgr_rsa_verify: buffer too small.\n"); ++ goto done; ++ } ++ ++ ret = _padding_check_PKCS1_type_1(payload, payload_len, d, pad_size, keylen); ++ if (ret != CL_SUCCESS) { ++ cli_dbgmsg("crtmgr_rsa_verify: RSA_padding_check_PKCS1_type_1() failed\n"); ++ goto done; ++ } ++ *buffer = d; ++ d = NULL; ++ ret = CL_SUCCESS; ++ ++done: ++ BN_CTX_free(bnctx); ++ BN_free(x); ++ free(d); ++ return ret; ++} ++ ++static int crtmgr_rsa_verify(cli_crt *x509, BIGNUM *sig, cli_crt_hashtype hashtype, const uint8_t *refhash) ++{ ++ int keylen = BN_num_bytes(x509->n), siglen = BN_num_bytes(sig); ++ int j, objlen, hashlen; ++ uint8_t *d; ++ uint8_t *buff; ++ int len; ++ cl_error_t ret; + + if (hashtype == CLI_SHA1RSA) { + hashlen = SHA1_HASH_SIZE; +@@ -303,132 +468,100 @@ static int crtmgr_rsa_verify(cli_crt *x509, fp_int *sig, cli_crt_hashtype hashty + return 1; + } + +- fp_init(&x); ++ if (MAX(keylen, siglen) - MIN(keylen, siglen) > 1) { ++ cli_dbgmsg("crtmgr_rsa_verify: keylen and siglen differ by more than one\n"); ++ return 1; ++ } ++ ++ ret = crtmgr_get_recov_data(sig, x509, &buff, &d, &len); ++ if (ret != CL_SUCCESS) ++ return 1; + + do { +- if (MAX(keylen, siglen) - MIN(keylen, siglen) > 1) { +- cli_dbgmsg("crtmgr_rsa_verify: keylen and siglen differ by more than one\n"); ++ j = 0; ++ ++ if (len <= hashlen) { ++ cli_dbgmsg("crtmgr_rsa_verify: encountered len less than hashlen\n"); + break; + } +- if ((ret = fp_exptmod(sig, &x509->e, &x509->n, &x))) { +- cli_warnmsg("crtmgr_rsa_verify: verification failed: fp_exptmod failed with %d\n", ret); ++ /* hash is asn1 der encoded */ ++ /* SEQ { SEQ { OID, NULL }, OCTET STRING */ ++ if (len < 2 || d[j] != 0x30 || d[j + 1] != len - 2) { ++ cli_dbgmsg("crtmgr_rsa_verify: unexpected hash to be ASN1 DER encoded.\n"); + break; + } +- if (fp_unsigned_bin_size(&x) != keylen - 1) { +- cli_dbgmsg("crtmgr_rsa_verify: keylen-1 doesn't match expected size of exptmod result\n"); +- break; +- } +- if (((unsigned int)fp_unsigned_bin_size(&x)) > sizeof(d)) { +- cli_dbgmsg("crtmgr_rsa_verify: exptmod result would overrun working buffer\n"); ++ len -= 2; ++ j += 2; ++ ++ if (len < 2 || d[j] != 0x30) { ++ cli_dbgmsg("crtmgr_rsa_verify: expected SEQUENCE at beginning of cert AlgorithmIdentifier\n"); + break; + } + +- fp_to_unsigned_bin(&x, d); ++ objlen = d[j + 1]; + +- if (*d != 1) { /* block type 1 */ +- cli_dbgmsg("crtmgr_rsa_verify: expected block type 1 at d[0]\n"); ++ len -= 2; ++ j += 2; ++ if (len < objlen) { ++ cli_dbgmsg("crtmgr_rsa_verify: key length mismatch in ASN1 DER hash encoding\n"); + break; + } +- +- keylen -= 1; /* 0xff padding */ +- for (j = 1; j < keylen - 2; j++) +- if (d[j] != 0xff) +- break; +- if (j == keylen - 2) { +- cli_dbgmsg("crtmgr_rsa_verify: only encountered 0xFF padding parsing cert\n"); +- break; +- } +- if (d[j] != 0) { /* 0x00 separator */ +- cli_dbgmsg("crtmgr_rsa_verify: expected 0x00 separator\n"); +- break; +- } +- +- j++; +- keylen -= j; /* asn1 size */ +- +- if (keylen < hashlen) { +- cli_dbgmsg("crtmgr_rsa_verify: encountered keylen less than hashlen\n"); +- break; +- } +- if (keylen > hashlen) { +- /* hash is asn1 der encoded */ +- /* SEQ { SEQ { OID, NULL }, OCTET STRING */ +- if (keylen < 2 || d[j] != 0x30 || d[j + 1] + 2 != keylen) { +- cli_dbgmsg("crtmgr_rsa_verify: unexpected hash to be ASN1 DER encoded\n"); ++ if (objlen == 9) { ++ // Check for OID type indicating a length of 5, OID_sha1, and the NULL type/value ++ if (hashtype != CLI_SHA1RSA || memcmp(&d[j], "\x06\x05" OID_sha1 "\x05\x00", 9)) { ++ cli_errmsg("crtmgr_rsa_verify: FIXME ACAB - CRYPTO MISSING?\n"); + break; + } +- keylen -= 2; +- j += 2; +- +- if (keylen < 2 || d[j] != 0x30) { +- cli_dbgmsg("crtmgr_rsa_verify: expected SEQUENCE at beginning of cert AlgorithmIdentifier\n"); ++ } else if (objlen == 12) { ++ // Check for OID type indicating a length of 8, OID_md5, and the NULL type/value ++ if (hashtype != CLI_MD5RSA || memcmp(&d[j], "\x06\x08" OID_md5 "\x05\x00", 12)) { ++ cli_errmsg("crtmgr_rsa_verify: FIXME ACAB - CRYPTO MISSING?\n"); + break; + } +- +- objlen = d[j + 1]; +- +- keylen -= 2; +- j += 2; +- if (keylen < objlen) { +- cli_dbgmsg("crtmgr_rsa_verify: key length mismatch in ASN1 DER hash encoding\n"); +- break; +- } +- if (objlen == 9) { +- // Check for OID type indicating a length of 5, OID_sha1, and the NULL type/value +- if (hashtype != CLI_SHA1RSA || memcmp(&d[j], "\x06\x05" OID_sha1 "\x05\x00", 9)) { +- cli_errmsg("crtmgr_rsa_verify: FIXME ACAB - CRYPTO MISSING?\n"); ++ } else if (objlen == 13) { ++ if (hashtype == CLI_SHA256RSA) { ++ // Check for OID type indicating a length of 9, OID_sha256, and the NULL type/value ++ if (0 != memcmp(&d[j], "\x06\x09" OID_sha256 "\x05\x00", 13)) { ++ cli_dbgmsg("crtmgr_rsa_verify: invalid AlgorithmIdentifier block for SHA256 hash\n"); + break; + } +- } else if (objlen == 12) { +- // Check for OID type indicating a length of 8, OID_md5, and the NULL type/value +- if (hashtype != CLI_MD5RSA || memcmp(&d[j], "\x06\x08" OID_md5 "\x05\x00", 12)) { +- cli_errmsg("crtmgr_rsa_verify: FIXME ACAB - CRYPTO MISSING?\n"); ++ ++ } else if (hashtype == CLI_SHA384RSA) { ++ // Check for OID type indicating a length of 9, OID_sha384, and the NULL type/value ++ if (0 != memcmp(&d[j], "\x06\x09" OID_sha384 "\x05\x00", 13)) { ++ cli_dbgmsg("crtmgr_rsa_verify: invalid AlgorithmIdentifier block for SHA384 hash\n"); + break; + } +- } else if (objlen == 13) { +- if (hashtype == CLI_SHA256RSA) { +- // Check for OID type indicating a length of 9, OID_sha256, and the NULL type/value +- if (0 != memcmp(&d[j], "\x06\x09" OID_sha256 "\x05\x00", 13)) { +- cli_dbgmsg("crtmgr_rsa_verify: invalid AlgorithmIdentifier block for SHA256 hash\n"); +- break; +- } + +- } else if (hashtype == CLI_SHA384RSA) { +- // Check for OID type indicating a length of 9, OID_sha384, and the NULL type/value +- if (0 != memcmp(&d[j], "\x06\x09" OID_sha384 "\x05\x00", 13)) { +- cli_dbgmsg("crtmgr_rsa_verify: invalid AlgorithmIdentifier block for SHA384 hash\n"); +- break; +- } +- +- } else if (hashtype == CLI_SHA512RSA) { +- // Check for OID type indicating a length of 9, OID_sha512, and the NULL type/value +- if (0 != memcmp(&d[j], "\x06\x09" OID_sha512 "\x05\x00", 13)) { +- cli_dbgmsg("crtmgr_rsa_verify: invalid AlgorithmIdentifier block for SHA512 hash\n"); +- break; +- } +- +- } else { +- cli_errmsg("crtmgr_rsa_verify: FIXME ACAB - CRYPTO MISSING?\n"); ++ } else if (hashtype == CLI_SHA512RSA) { ++ // Check for OID type indicating a length of 9, OID_sha512, and the NULL type/value ++ if (0 != memcmp(&d[j], "\x06\x09" OID_sha512 "\x05\x00", 13)) { ++ cli_dbgmsg("crtmgr_rsa_verify: invalid AlgorithmIdentifier block for SHA512 hash\n"); + break; + } ++ + } else { + cli_errmsg("crtmgr_rsa_verify: FIXME ACAB - CRYPTO MISSING?\n"); + break; + } +- +- keylen -= objlen; +- j += objlen; +- if (keylen < 2 || d[j] != 0x04 || d[j + 1] != hashlen) { +- cli_dbgmsg("crtmgr_rsa_verify: hash length mismatch in ASN1 DER hash encoding\n"); +- break; +- } +- keylen -= 2; +- j += 2; +- if (keylen != hashlen) { +- cli_dbgmsg("crtmgr_rsa_verify: extra data in the ASN1 DER hash encoding\n"); +- break; +- } ++ } else { ++ cli_errmsg("crtmgr_rsa_verify: FIXME ACAB - CRYPTO MISSING?\n"); ++ break; + } ++ ++ len -= objlen; ++ j += objlen; ++ if (len < 2 || d[j] != 0x04 || d[j + 1] != hashlen) { ++ cli_dbgmsg("crtmgr_rsa_verify: hash length mismatch in ASN1 DER hash encoding\n"); ++ break; ++ } ++ j += 2; ++ len -= 2; ++ if (len != hashlen) { ++ cli_dbgmsg("crtmgr_rsa_verify: extra data in the ASN1 DER hash encoding\n"); ++ break; ++ } ++ + if (memcmp(&d[j], refhash, hashlen)) { + // This is a common error case if we are using crtmgr_rsa_verify to + // determine whether we've found the right issuer certificate based +@@ -438,10 +571,12 @@ static int crtmgr_rsa_verify(cli_crt *x509, fp_int *sig, cli_crt_hashtype hashty + break; + } + ++ free(buff); + return 0; + + } while (0); + ++ free(buff); + return 1; + } + +@@ -469,7 +604,7 @@ cli_crt *crtmgr_verify_crt(crtmgr *m, cli_crt *x509) + if (i->certSign && + !i->isBlocked && + !memcmp(i->subject, x509->issuer, sizeof(i->subject)) && +- !crtmgr_rsa_verify(i, &x509->sig, x509->hashtype, x509->tbshash)) { ++ !crtmgr_rsa_verify(i, x509->sig, x509->hashtype, x509->tbshash)) { + int curscore; + if ((x509->codeSign & i->codeSign) == x509->codeSign && (x509->timeSign & i->timeSign) == x509->timeSign) + return i; +@@ -493,16 +628,18 @@ cli_crt *crtmgr_verify_crt(crtmgr *m, cli_crt *x509) + cli_crt *crtmgr_verify_pkcs7(crtmgr *m, const uint8_t *issuer, const uint8_t *serial, const void *signature, unsigned int signature_len, cli_crt_hashtype hashtype, const uint8_t *refhash, cli_vrfy_type vrfytype) + { + cli_crt *i; +- fp_int sig; ++ BIGNUM *sig; + + if (signature_len < 1024 / 8 || signature_len > 4096 / 8 + 1) { + cli_dbgmsg("crtmgr_verify_pkcs7: unsupported sig len: %u\n", signature_len); + return NULL; + } + +- fp_init(&sig); ++ sig = BN_new(); ++ if (!sig) ++ return NULL; + +- fp_read_unsigned_bin(&sig, signature, signature_len); ++ BN_bin2bn(signature, signature_len, sig); + + for (i = m->crts; i; i = i->next) { + if (vrfytype == VRFY_CODE && !i->codeSign) +@@ -511,13 +648,13 @@ cli_crt *crtmgr_verify_pkcs7(crtmgr *m, const uint8_t *issuer, const uint8_t *se + continue; + if (!memcmp(i->issuer, issuer, sizeof(i->issuer)) && + !memcmp(i->serial, serial, sizeof(i->serial))) { +- if (!crtmgr_rsa_verify(i, &sig, hashtype, refhash)) { ++ if (!crtmgr_rsa_verify(i, sig, hashtype, refhash)) { + break; + } + cli_dbgmsg("crtmgr_verify_pkcs7: found cert with matching issuer and serial but RSA verification failed\n"); + } + } +- ++ BN_free(sig); + return i; + } + +diff --git a/libclamav/crtmgr.h b/libclamav/crtmgr.h +index eafd820..c8009578 100644 +--- a/libclamav/crtmgr.h ++++ b/libclamav/crtmgr.h +@@ -23,8 +23,8 @@ + #define __CRTMGR_H + + #include +- +-#include "bignum.h" ++#include ++#include + + typedef enum { CLI_HASHTYPE_ANY, /* used by crts added from .CRB rules */ + CLI_SHA1RSA, +@@ -63,9 +63,9 @@ typedef struct cli_crt_t { + * so it must have at least enough space for the largest hash in + * cli_crt_hashtype */ + uint8_t tbshash[SHA512_HASH_SIZE]; +- fp_int n; +- fp_int e; +- fp_int sig; ++ BIGNUM *n; ++ BIGNUM *e; ++ BIGNUM *sig; + time_t not_before; + time_t not_after; + cli_crt_hashtype hashtype; +@@ -82,11 +82,11 @@ typedef struct { + unsigned int items; + } crtmgr; + +-void cli_crt_init(cli_crt *x509); ++int cli_crt_init(cli_crt *x509); + void cli_crt_clear(cli_crt *x509); + void crtmgr_init(crtmgr *m); + void crtmgr_free(crtmgr *m); +-int crtmgr_add(crtmgr *m, cli_crt *x509); ++bool crtmgr_add(crtmgr *m, cli_crt *x509); + cli_crt *crtmgr_lookup(crtmgr *m, cli_crt *x509); + cli_crt *crtmgr_block_list_lookup(crtmgr *m, cli_crt *x509); + cli_crt *crtmgr_trust_list_lookup(crtmgr *m, cli_crt *x509, int crb_crts_only); +diff --git a/libclamav/dsig.c b/libclamav/dsig.c +index c8825b2..59303f9 100644 +--- a/libclamav/dsig.c ++++ b/libclamav/dsig.c +@@ -30,12 +30,12 @@ + #include + #include + #include ++#include + + #include "clamav.h" + #include "others.h" + #include "dsig.h" + #include "str.h" +-#include "bignum.h" + + #ifndef _WIN32 + #include +@@ -81,37 +81,83 @@ static char cli_ndecode(unsigned char value) + return -1; + } + +-static unsigned char *cli_decodesig(const char *sig, unsigned int plen, fp_int e, fp_int n) ++static unsigned char *cli_decodesig(const char *sig, unsigned int plen, BIGNUM *e, BIGNUM *n) + { + int i, slen = strlen(sig), dec; +- unsigned char *plain; +- fp_int r, p, c; ++ unsigned char *plain = NULL, *ret_sig = NULL; ++ BIGNUM *r = NULL, *p = NULL, *c = NULL; ++ BN_CTX *bn_ctx; ++ unsigned int bn_bytes; ++ ; + +- fp_init(&r); +- fp_init(&c); ++ r = BN_new(); ++ if (!r) { ++ goto done; ++ } ++ ++ p = BN_new(); ++ if (!p) { ++ goto done; ++ } ++ ++ c = BN_new(); ++ if (!c) { ++ goto done; ++ } ++ ++ bn_ctx = BN_CTX_new(); ++ if (!bn_ctx) { ++ goto done; ++ } ++ ++ BN_zero(c); + for (i = 0; i < slen; i++) { + if ((dec = cli_ndecode(sig[i])) < 0) { +- return NULL; ++ goto done; ++ } ++ if (!BN_set_word(r, dec)) { ++ goto done; ++ } ++ if (!BN_lshift(r, r, 6 * i)) { ++ goto done; + } +- fp_set(&r, dec); +- fp_mul_2d(&r, 6 * i, &r); +- fp_add(&r, &c, &c); +- } + +- plain = (unsigned char *)cli_calloc(plen + 1, sizeof(unsigned char)); ++ if (!BN_add(c, c, r)) { ++ goto done; ++ } ++ } ++ if (!BN_mod_exp(p, c, e, n, bn_ctx)) { ++ goto done; ++ } ++ bn_bytes = BN_num_bytes(p); ++ /* Sometimes the size of the resulting BN (128) is larger than the expected ++ * length (16). The result does not match in this case. Instead of ++ * allocating memory and filling it, we fail early. ++ */ ++ if (plen < bn_bytes) { ++ cli_errmsg("cli_decodesig: Resulting signature too large (%d vs %d).\n", ++ bn_bytes, plen); ++ goto done; ++ } ++ plain = cli_calloc(plen, sizeof(unsigned char)); + if (!plain) { + cli_errmsg("cli_decodesig: Can't allocate memory for 'plain'\n"); +- return NULL; ++ goto done; + } +- fp_init(&p); +- fp_exptmod(&c, &e, &n, &p); /* plain = cipher^e mod n */ +- fp_set(&c, 256); +- for (i = plen - 1; i >= 0; i--) { /* reverse */ +- fp_div(&p, &c, &p, &r); +- plain[i] = MP_GET(&r); ++ if (!BN_bn2bin(p, plain)) { ++ goto done; + } + +- return plain; ++ ret_sig = plain; ++ plain = NULL; ++ ++done: ++ BN_free(r); ++ BN_free(p); ++ BN_free(c); ++ BN_CTX_free(bn_ctx); ++ free(plain); ++ return ret_sig; + } + + char *cli_getdsig(const char *host, const char *user, const unsigned char *data, unsigned int datalen, unsigned short mode) +@@ -228,41 +274,55 @@ char *cli_getdsig(const char *host, const char *user, const unsigned char *data, + return strdup(pt); + } + +-int cli_versig(const char *md5, const char *dsig) ++cl_error_t cli_versig(const char *md5, const char *dsig) + { +- fp_int n, e; +- char *pt, *pt2; ++ BIGNUM *n = NULL, *e = NULL; ++ char *pt = NULL, *pt2 = NULL; ++ int ret; ++ ++ ret = CL_EMEM; ++ n = BN_new(); ++ if (!n) ++ goto done; ++ ++ e = BN_new(); ++ if (!e) ++ goto done; ++ ++ ret = CL_EVERIFY; ++ if (!BN_dec2bn(&e, CLI_ESTR)) ++ goto done; ++ ++ if (!BN_dec2bn(&n, CLI_NSTR)) ++ goto done; + + if (strlen(md5) != 32 || !isalnum(md5[0])) { + /* someone is trying to fool us with empty/malformed MD5 ? */ + cli_errmsg("SECURITY WARNING: MD5 basic test failure.\n"); +- return CL_EVERIFY; ++ goto done; + } + +- fp_init(&n); +- fp_read_radix(&n, CLI_NSTR, 10); +- fp_init(&e); +- fp_read_radix(&e, CLI_ESTR, 10); +- +- if (!(pt = (char *)cli_decodesig(dsig, 16, e, n))) { +- return CL_EVERIFY; +- } ++ if (!(pt = (char *)cli_decodesig(dsig, 16, e, n))) ++ goto done; + + pt2 = cli_str2hex(pt, 16); +- free(pt); + + cli_dbgmsg("cli_versig: Decoded signature: %s\n", pt2); + + if (strncmp(md5, pt2, 32)) { + cli_dbgmsg("cli_versig: Signature doesn't match.\n"); +- free(pt2); +- return CL_EVERIFY; ++ goto done; + } + +- free(pt2); +- + cli_dbgmsg("cli_versig: Digital signature is correct.\n"); +- return CL_SUCCESS; ++ ret = CL_SUCCESS; ++ ++done: ++ free(pt); ++ free(pt2); ++ BN_free(n); ++ BN_free(e); ++ return ret; + } + + #define HASH_LEN 32 +@@ -275,21 +335,39 @@ int cli_versig2(const unsigned char *sha256, const char *dsig_str, const char *n + unsigned char mask[BLK_LEN], data[BLK_LEN], final[8 + 2 * HASH_LEN], c[4]; + unsigned int i, rounds; + void *ctx; +- fp_int n, e; ++ BIGNUM *n, *e; ++ int ret; + +- fp_init(&e); +- fp_read_radix(&e, e_str, 10); +- fp_init(&n); +- fp_read_radix(&n, n_str, 10); ++ n = BN_new(); ++ e = BN_new(); ++ ++ if (!n || !e) { ++ ret = CL_EMEM; ++ goto done; ++ } ++ ++ ret = CL_EVERIFY; ++ if (!BN_dec2bn(&e, e_str)) ++ goto done; ++ ++ if (!BN_dec2bn(&n, n_str)) ++ goto done; + + decoded = cli_decodesig(dsig_str, PAD_LEN, e, n); +- if (!decoded) +- return CL_EVERIFY; ++ if (!decoded) { ++ ret = CL_EVERIFY; ++ goto done; ++ } + + if (decoded[PAD_LEN - 1] != 0xbc) { + free(decoded); +- return CL_EVERIFY; ++ ret = CL_EVERIFY; + } ++ BN_free(n); ++ BN_free(e); ++ ++ n = NULL; ++ e = NULL; + + memcpy(mask, decoded, BLK_LEN); + memcpy(digest2, &decoded[BLK_LEN], HASH_LEN); +@@ -337,4 +415,9 @@ int cli_versig2(const unsigned char *sha256, const char *dsig_str, const char *n + cl_finish_hash(ctx, digest1); + + return memcmp(digest1, digest2, HASH_LEN) ? CL_EVERIFY : CL_SUCCESS; ++ ++done: ++ BN_free(n); ++ BN_free(e); ++ return ret; + } +diff --git a/libclamav/dsig.h b/libclamav/dsig.h +index cb11c65..36a3176 100644 +--- a/libclamav/dsig.h ++++ b/libclamav/dsig.h +@@ -29,7 +29,7 @@ + #include "clamav-config.h" + #endif + +-int cli_versig(const char *md5, const char *dsig); ++cl_error_t cli_versig(const char *md5, const char *dsig); + int cli_versig2(const unsigned char *sha256, const char *dsig_str, const char *n_str, const char *e_str); + + /** +diff --git a/libclamav/readdb.c b/libclamav/readdb.c +index b8e7d78..5b89970 100644 +--- a/libclamav/readdb.c ++++ b/libclamav/readdb.c +@@ -3310,9 +3310,7 @@ static int cli_loadcrt(FILE *fs, struct cl_engine *engine, struct cli_dbio *dbio + char *tokens[CRT_TOKENS + 1]; + size_t line = 0, tokens_count; + cli_crt ca; +- int ret = CL_SUCCESS; +- char *pubkey = NULL; +- const uint8_t exp[] = "\x01\x00\x01"; ++ int ret = CL_SUCCESS; + + if (!(engine->dconf->pe & PE_CONF_CERTS)) { + cli_dbgmsg("cli_loadcrt: Ignoring .crb sigs due to DCONF configuration\n"); +@@ -3324,7 +3322,10 @@ static int cli_loadcrt(FILE *fs, struct cl_engine *engine, struct cli_dbio *dbio + return ret; + } + +- cli_crt_init(&ca); ++ if (cli_crt_init(&ca) < 0) { ++ cli_dbgmsg("cli_loadcrt: No mem for CA init.\n"); ++ return CL_EMEM; ++ } + memset(ca.issuer, 0xca, sizeof(ca.issuer)); + + while (cli_dbgets(buffer, FILEBUFF, fs, dbio)) { +@@ -3402,16 +3403,17 @@ static int cli_loadcrt(FILE *fs, struct cl_engine *engine, struct cli_dbio *dbio + goto done; + } + +- pubkey = cli_hex2str(tokens[4]); +- if (!pubkey) { ++ if (BN_hex2bn(&ca.n, tokens[4]) == 0) { + cli_errmsg("cli_loadcrt: line %u: Cannot convert public key to binary string\n", (unsigned int)line); + ret = CL_EMALFDB; + goto done; + } + +- fp_read_unsigned_bin(&(ca.n), (const unsigned char *)pubkey, strlen(tokens[4]) / 2); +- +- fp_read_unsigned_bin(&(ca.e), exp, sizeof(exp) - 1); ++ /* Set the RSA exponent of 65537 */ ++ if (!BN_set_word(ca.e, 65537)) { ++ cli_errmsg("cli_loadcrt: Cannot set the exponent.\n"); ++ goto done; ++ } + + switch (tokens[6][0]) { + case '1': +@@ -3463,13 +3465,9 @@ static int cli_loadcrt(FILE *fs, struct cl_engine *engine, struct cli_dbio *dbio + + ca.hashtype = CLI_HASHTYPE_ANY; + crtmgr_add(&(engine->cmgr), &ca); +- +- FREE(pubkey); + } + + done: +- FREE(pubkey); +- + cli_dbgmsg("Number of certs: %d\n", engine->cmgr.items); + cli_crt_clear(&ca); + return ret; +diff --git a/libclamav/textnorm.c b/libclamav/textnorm.c +index 6c45530..90db3e6 100644 +--- a/libclamav/textnorm.c ++++ b/libclamav/textnorm.c +@@ -30,7 +30,6 @@ + #include + #include "clamav.h" + #include "textnorm.h" +-#include "bignum.h" + + int text_normalize_init(struct text_norm_state *state, unsigned char *out, size_t out_len) + { +diff --git a/libclamav/xdp.c b/libclamav/xdp.c +index f0e2fdd..97eeecf 100644 +--- a/libclamav/xdp.c ++++ b/libclamav/xdp.c +@@ -52,7 +52,6 @@ + #include "scanners.h" + #include "conv.h" + #include "xdp.h" +-#include "bignum.h" + #include "filetypes.h" + + static char *dump_xdp(cli_ctx *ctx, const char *start, size_t sz); diff -Nru clamav-1.0.1+dfsg/debian/patches/Remove-bundled-tomfastmath-library.patch clamav-1.0.2+dfsg/debian/patches/Remove-bundled-tomfastmath-library.patch --- clamav-1.0.1+dfsg/debian/patches/Remove-bundled-tomfastmath-library.patch 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/patches/Remove-bundled-tomfastmath-library.patch 2023-08-19 16:44:39.000000000 +0000 @@ -0,0 +1,243 @@ +From de9cef7ab6e5a57247f9598340a0e64869429870 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Sat, 18 Feb 2023 10:45:50 +0100 +Subject: Remove bundled tomfastmath library. + +Now that the tomfastmath library is no longer used, remove it from the +tree. + +Patch-Name: Remove-bundled-tomfastmath-library.patch +Signed-off-by: Sebastian Andrzej Siewior +--- + .github/workflows/clang-format.yml | 2 +- + README.md | 1 - + clamav-config.h.cmake.in | 3 - + libclamav/CMakeLists.txt | 104 ----------------------------- + libclamav/Doxyfile | 13 +--- + unit_tests/CMakeLists.txt | 3 - + 6 files changed, 2 insertions(+), 124 deletions(-) + +diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml +index b3cbab1..8ededc0 100644 +--- a/.github/workflows/clang-format.yml ++++ b/.github/workflows/clang-format.yml +@@ -22,7 +22,7 @@ name: clang-format + matrix: + path: + - check: "libclamav" +- exclude: "(iana_cctld|bytecode_api_|bytecode_hooks|rijndael|yara|inffixed|inflate|queue|tomsfastmath|nsis|7z|regex|c++|generated)" ++ exclude: "(iana_cctld|bytecode_api_|bytecode_hooks|rijndael|yara|inffixed|inflate|queue|nsis|7z|regex|c++|generated)" + - check: "libfreshclam" + exclude: "" + - check: "clamav-milter" +diff --git a/README.md b/README.md +index 548d672..68dec8c 100644 +--- a/README.md ++++ b/README.md +@@ -113,7 +113,6 @@ ClamAV contains a number of components that include code copied in part or in + whole from 3rd party projects and whose code is not owned by Cisco and which + are licensed differently than ClamAV. These include: + +-- tomsfastmath: public domain + - Yara: Apache 2.0 license + - Yara has since switched to the BSD 3-Clause License; + Our source is out-of-date and needs to be updated. +diff --git a/clamav-config.h.cmake.in b/clamav-config.h.cmake.in +index 5de4cbf..b21af87 100644 +--- a/clamav-config.h.cmake.in ++++ b/clamav-config.h.cmake.in +@@ -401,9 +401,6 @@ + /* Define if UNRAR is linked instead of loaded. */ + #cmakedefine UNRAR_LINKED 1 + +-/* Define if UNRAR is linked instead of loaded. */ +-#cmakedefine HAVE_SYSTEM_TOMSFASTMATH 1 +- + /* "Full clamav library version number" */ + #define LIBCLAMAV_FULLVER "@LIBCLAMAV_VERSION@" + +diff --git a/libclamav/CMakeLists.txt b/libclamav/CMakeLists.txt +index 5eb2e95..82f4e0a 100644 +--- a/libclamav/CMakeLists.txt ++++ b/libclamav/CMakeLists.txt +@@ -23,15 +23,6 @@ endif() + + add_definitions(-DTHIS_IS_LIBCLAMAV) + +-# Enable overflow checks in TomsFastMath's fp_exptmod() function. +-add_definitions(-DTFM_CHECK) +- +-# Just enable ASM in in TomsFastMath's on x86-64 where we know it works. +-# on i686 we run out of registers with -fPIC, and on ia64 we miscompile. +-if(NOT CMAKE_COMPILER_IS_GNUCC OR NOT (CMAKE_SIZEOF_VOID_P EQUAL 8)) +- add_definitions(-DTFM_NO_ASM) +-endif() +- + # 3rd party libraries included in libclamav + add_library( regex OBJECT ) + target_sources( regex +@@ -166,99 +157,6 @@ target_link_libraries( yara + PCRE2::pcre2 + JSONC::jsonc ) + +-add_library( tomsfastmath OBJECT ) +-target_sources( tomsfastmath +- PRIVATE +- tomsfastmath/addsub/fp_add.c +- tomsfastmath/addsub/fp_add_d.c +- tomsfastmath/addsub/fp_addmod.c +- tomsfastmath/addsub/fp_cmp.c +- tomsfastmath/addsub/fp_cmp_d.c +- tomsfastmath/addsub/fp_cmp_mag.c +- tomsfastmath/addsub/fp_sub.c +- tomsfastmath/addsub/fp_sub_d.c +- tomsfastmath/addsub/fp_submod.c +- tomsfastmath/addsub/s_fp_add.c +- tomsfastmath/addsub/s_fp_sub.c +- tomsfastmath/bin/fp_radix_size.c +- tomsfastmath/bin/fp_read_radix.c +- tomsfastmath/bin/fp_read_signed_bin.c +- tomsfastmath/bin/fp_read_unsigned_bin.c +- tomsfastmath/bin/fp_reverse.c +- tomsfastmath/bin/fp_s_rmap.c +- tomsfastmath/bin/fp_signed_bin_size.c +- tomsfastmath/bin/fp_to_signed_bin.c +- tomsfastmath/bin/fp_to_unsigned_bin.c +- tomsfastmath/bin/fp_toradix.c +- tomsfastmath/bin/fp_toradix_n.c +- tomsfastmath/bin/fp_unsigned_bin_size.c +- tomsfastmath/bit/fp_cnt_lsb.c +- tomsfastmath/bit/fp_count_bits.c +- tomsfastmath/bit/fp_div_2.c +- tomsfastmath/bit/fp_div_2d.c +- tomsfastmath/bit/fp_lshd.c +- tomsfastmath/bit/fp_mod_2d.c +- tomsfastmath/bit/fp_rshd.c +- tomsfastmath/divide/fp_div.c +- tomsfastmath/divide/fp_div_d.c +- tomsfastmath/divide/fp_mod.c +- tomsfastmath/divide/fp_mod_d.c +- tomsfastmath/exptmod/fp_2expt.c +- tomsfastmath/exptmod/fp_exptmod.c +- tomsfastmath/misc/fp_ident.c +- tomsfastmath/misc/fp_set.c +- tomsfastmath/mont/fp_montgomery_calc_normalization.c +- tomsfastmath/mont/fp_montgomery_reduce.c +- tomsfastmath/mont/fp_montgomery_setup.c +- tomsfastmath/mul/fp_mul.c +- tomsfastmath/mul/fp_mul_comba.c +- tomsfastmath/mul/fp_mul_2.c +- tomsfastmath/mul/fp_mul_2d.c +- tomsfastmath/mul/fp_mul_comba_12.c +- tomsfastmath/mul/fp_mul_comba_17.c +- tomsfastmath/mul/fp_mul_comba_20.c +- tomsfastmath/mul/fp_mul_comba_24.c +- tomsfastmath/mul/fp_mul_comba_28.c +- tomsfastmath/mul/fp_mul_comba_3.c +- tomsfastmath/mul/fp_mul_comba_32.c +- tomsfastmath/mul/fp_mul_comba_4.c +- tomsfastmath/mul/fp_mul_comba_48.c +- tomsfastmath/mul/fp_mul_comba_6.c +- tomsfastmath/mul/fp_mul_comba_64.c +- tomsfastmath/mul/fp_mul_comba_7.c +- tomsfastmath/mul/fp_mul_comba_8.c +- tomsfastmath/mul/fp_mul_comba_9.c +- tomsfastmath/mul/fp_mul_comba_small_set.c +- tomsfastmath/mul/fp_mul_d.c +- tomsfastmath/mul/fp_mulmod.c +- tomsfastmath/numtheory/fp_invmod.c +- tomsfastmath/sqr/fp_sqr.c +- tomsfastmath/sqr/fp_sqr_comba_12.c +- tomsfastmath/sqr/fp_sqr_comba_17.c +- tomsfastmath/sqr/fp_sqr_comba_20.c +- tomsfastmath/sqr/fp_sqr_comba_24.c +- tomsfastmath/sqr/fp_sqr_comba_28.c +- tomsfastmath/sqr/fp_sqr_comba_3.c +- tomsfastmath/sqr/fp_sqr_comba_32.c +- tomsfastmath/sqr/fp_sqr_comba_4.c +- tomsfastmath/sqr/fp_sqr_comba_48.c +- tomsfastmath/sqr/fp_sqr_comba_6.c +- tomsfastmath/sqr/fp_sqr_comba_64.c +- tomsfastmath/sqr/fp_sqr_comba_7.c +- tomsfastmath/sqr/fp_sqr_comba_8.c +- tomsfastmath/sqr/fp_sqr_comba_9.c +- tomsfastmath/sqr/fp_sqr_comba_generic.c +- tomsfastmath/sqr/fp_sqr_comba_small_set.c +- tomsfastmath/sqr/fp_sqrmod.c +- ) +-target_include_directories( tomsfastmath +- PRIVATE +- ${CMAKE_BINARY_DIR} +- ${CMAKE_CURRENT_SOURCE_DIR}/tomsfastmath/headers +- PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ) +-set_target_properties( tomsfastmath PROPERTIES +- COMPILE_FLAGS "${WARNCFLAGS}" ) +- + # Bytecode Runtime + add_library( bytecode_runtime OBJECT ) + if(LLVM_FOUND) +@@ -525,7 +423,6 @@ if(ENABLE_SHARED_LIB) + regex + lzma_sdk + yara +- tomsfastmath + bytecode_runtime + ${LIBMSPACK} + ClamAV::libclamav_rust +@@ -637,7 +534,6 @@ if(ENABLE_STATIC_LIB) + regex + lzma_sdk + yara +- tomsfastmath + bytecode_runtime + ${LIBMSPACK} + ClamAV::libclamav_rust +diff --git a/libclamav/Doxyfile b/libclamav/Doxyfile +index a83cf22..a2593ea 100644 +--- a/libclamav/Doxyfile ++++ b/libclamav/Doxyfile +@@ -111,15 +111,4 @@ INPUT = . \ + jsparse \ + jsparse/generated \ + nsis \ +- regex \ +- tomsfastmath \ +- tomsfastmath/addsub \ +- tomsfastmath/bin \ +- tomsfastmath/bit \ +- tomsfastmath/divide \ +- tomsfastmath/exptmod \ +- tomsfastmath/misc \ +- tomsfastmath/mont \ +- tomsfastmath/mul \ +- tomsfastmath/numtheory \ +- tomsfastmath/sqr ++ regex +diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt +index 567e95e..0b3d565 100644 +--- a/unit_tests/CMakeLists.txt ++++ b/unit_tests/CMakeLists.txt +@@ -49,7 +49,6 @@ if(ENABLE_APP) + PRIVATE + ClamAV::libclamav + libcheck::check +- tomsfastmath + JSONC::jsonc + ${LIBMSPACK} + OpenSSL::SSL +@@ -85,7 +84,6 @@ if(ENABLE_APP) + ClamAV::libclamav + ClamAV::common + libcheck::check +- tomsfastmath + JSONC::jsonc + ${LIBMSPACK} + OpenSSL::SSL +@@ -133,7 +131,6 @@ target_link_libraries(check_clamav + PRIVATE + ClamAV::libclamav + libcheck::check +- tomsfastmath + JSONC::jsonc + ${LIBMSPACK} + OpenSSL::SSL diff -Nru clamav-1.0.1+dfsg/debian/patches/resolve-armhf-ftbfs.patch clamav-1.0.2+dfsg/debian/patches/resolve-armhf-ftbfs.patch --- clamav-1.0.1+dfsg/debian/patches/resolve-armhf-ftbfs.patch 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/patches/resolve-armhf-ftbfs.patch 2023-08-23 11:18:43.000000000 +0000 @@ -0,0 +1,40 @@ +Description: resolve armhf failure to build from source. +Author: Vladimir Petko + +--- a/libclamav/special.c ++++ b/libclamav/special.c +@@ -48,7 +48,8 @@ + + int cli_check_mydoom_log(cli_ctx *ctx) + { +- const uint32_t *record; ++ const uint32_t record[16]; ++ const uint32_t mask = 0xffffffff; + uint32_t check, key; + fmap_t *map = ctx->fmap; + unsigned int blocks = map->len / (8 * 4); +@@ -59,14 +60,20 @@ + if (blocks > 5) + blocks = 5; + +- record = fmap_need_off_once(map, 0, 8 * 4 * blocks); +- if (!record) ++ // returns unaligned memory block ++ const char* data = fmap_need_off_once(map, 0, 8 * 4 * blocks); ++ if (!data) + return CL_CLEAN; ++ + while (blocks) { /* This wasn't probably intended but that's what the current code does anyway */ +- if (record[--blocks] == 0xffffffff) ++ unsigned int offset = --blocks; ++ offset *=sizeof(uint32_t); ++ // safe (but slow) on unaligned memory ++ if (!memcmp(&data[offset], &mask, sizeof(uint32_t))) + return CL_CLEAN; + } +- ++ // copy into aligned array to perform bit operations ++ memcpy(record, data, sizeof(record)); + key = ~be32_to_host(record[0]); + check = (be32_to_host(record[1]) ^ key) + + (be32_to_host(record[2]) ^ key) + diff -Nru clamav-1.0.1+dfsg/debian/patches/series clamav-1.0.2+dfsg/debian/patches/series --- clamav-1.0.1+dfsg/debian/patches/series 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/patches/series 2023-08-23 11:16:37.000000000 +0000 @@ -1,9 +1,10 @@ Change-paths-in-sample-conf-file-to-match-Debian.patch -clamd_dont_depend_on_clamav_demon_socket.patch -Use-either-system-s-tomfastmath-library-or-the-built.patch cargo-Remove-windows-referenfes.patch libclamav-Sort-libclamav.map-and-libfreshclam.map.patch libclamav-Add-missing-symbols.patch Add-a-version-script-for-libclamav-and-libfreshclam.patch libclamav-pe-Use-endian-wrapper-in-more-places.patch Add-an-option-to-avoid-setting-RPATH-on-unix-systems.patch +libclamav-Use-OpenSSL-BN-instead-tomfastmath.patch +Remove-bundled-tomfastmath-library.patch +resolve-armhf-ftbfs.patch diff -Nru clamav-1.0.1+dfsg/debian/patches/Use-either-system-s-tomfastmath-library-or-the-built.patch clamav-1.0.2+dfsg/debian/patches/Use-either-system-s-tomfastmath-library-or-the-built.patch --- clamav-1.0.1+dfsg/debian/patches/Use-either-system-s-tomfastmath-library-or-the-built.patch 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/patches/Use-either-system-s-tomfastmath-library-or-the-built.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,231 +0,0 @@ -From c9cdab0a6a11d86f7f39973b86f9752fc2000f54 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 30 Dec 2022 19:06:28 +0100 -Subject: Use either system's tomfastmath library or the built-in one. - -Patch-Name: Use-either-system-s-tomfastmath-library-or-the-built.patch -Signed-off-by: Sebastian Andrzej Siewior ---- - CMakeLists.txt | 15 +++++++ - CMakeOptions.cmake | 3 ++ - cmake/FindTOMSFASTMATH.cmake | 85 ++++++++++++++++++++++++++++++++++++ - libclamav/CMakeLists.txt | 7 ++- - unit_tests/CMakeLists.txt | 6 +-- - 5 files changed, 111 insertions(+), 5 deletions(-) - create mode 100644 cmake/FindTOMSFASTMATH.cmake - -diff --git a/CMakeLists.txt b/CMakeLists.txt -index edaa6b4..a146bc1 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -460,6 +460,11 @@ if(ZLIB_FOUND) - set(HAVE_LIBZ 1) - endif() - -+find_package(TOMSFASTMATH REQUIRED) -+if(TOMSFASTMATH_FOUND) -+ set(HAVE_TFM_H 1) -+endif() -+ - find_package(BZip2 REQUIRED) - if(BZIP2_FOUND) - set(HAVE_BZLIB_H 1) -@@ -1205,6 +1210,16 @@ ${_} ${e}${OPENSSL_LIBRARIES} - ${b} JSON support: ${e} - ${_} json-c ${e}${JSONC_INCLUDE_DIRS} - ${_} ${e}${JSONC_LIBRARIES} -+${b} Math support: ${e}") -+if(TOMFASTMATH_BUILTIN) -+message("\ -+${_} tomsfastmath ${e}built-in") -+else() -+message("\ -+${_} tomsfastmath ${e}${TOMSFASTMATH_INCLUDE_DIR} -+${_} ${e}${TOMSFASTMATH_LIBRARY}") -+endif() -+message("\ - ${b} Threading support: ${e}") - if(WIN32) - message("\ -diff --git a/CMakeOptions.cmake b/CMakeOptions.cmake -index d995bac..2b377cd 100644 ---- a/CMakeOptions.cmake -+++ b/CMakeOptions.cmake -@@ -116,6 +116,9 @@ option(ENABLE_SYSTEMD - "Install systemd service files if systemd is found." - ${ENABLE_SYSTEMD_DEFAULT}) - -+option(ENABLE_EXTERNAL_TOMFASTMATH -+ "Use system's tomfastmath instead of internal bundled version.") -+ - # For reference determining target platform: - # Rust Targets: https://doc.rust-lang.org/nightly/rustc/platform-support.html - option(RUST_COMPILER_TARGET -diff --git a/cmake/FindTOMSFASTMATH.cmake b/cmake/FindTOMSFASTMATH.cmake -new file mode 100644 -index 0000000..abe1b10 ---- /dev/null -+++ b/cmake/FindTOMSFASTMATH.cmake -@@ -0,0 +1,85 @@ -+# Distributed under the OSI-approved BSD 3-Clause License. See accompanying -+# file Copyright.txt or https://cmake.org/licensing for details. -+ -+#[=======================================================================[.rst: -+FindTOMSFASTMATH -+------- -+ -+Finds the TOMSFASTMATH library. -+ -+Imported Targets -+^^^^^^^^^^^^^^^^ -+ -+This module provides the following imported targets, if found: -+ -+``TOMSFASTMATH::tfm`` -+The TOMSFASTMATH library -+ -+Result Variables -+^^^^^^^^^^^^^^^^ -+ -+This will define the following variables: -+ -+``TOMSFASTMATH_FOUND`` -+True if the system has the TOMSFASTMATH library. -+``TOMSFASTMATH_VERSION`` -+The version of the TOMSFASTMATH library which was found. -+``TOMSFASTMATH_INCLUDE_DIRS`` -+Include directories needed to use TOMSFASTMATH. -+``TOMSFASTMATH_LIBRARIES`` -+Libraries needed to link to TOMSFASTMATH. -+ -+Cache Variables -+^^^^^^^^^^^^^^^ -+ -+The following cache variables may also be set: -+ -+``TOMSFASTMATH_INCLUDE_DIR`` -+ The directory containing ``tfm.h``. -+ ``TOMSFASTMATH_LIBRARY`` -+ The path to the TOMSFASTMATH library. -+ -+#]=======================================================================] -+ -+if(NOT ENABLE_EXTERNAL_TOMFASTMATH) -+ set(TOMFASTMATH_LIB_NAME "tomsfastmath") -+ set(TOMFASTMATH_BUILTIN 1) -+else() -+ set(TOMFASTMATH_LIB_NAME "tfm") -+ add_definitions(-DHAVE_SYSTEM_TOMSFASTMATH) -+ -+find_package(PkgConfig QUIET) -+pkg_check_modules(PC_TOMSFASTMATH QUIET tomsfastmath) -+ -+find_path(TOMSFASTMATH_INCLUDE_DIR -+ NAMES tfm.h -+ PATHS ${PC_TOMSFASTMATH_INCLUDE_DIRS} -+ PATH_SUFFIXES tfm -+) -+find_library(TOMSFASTMATH_LIBRARY -+ NAMES tfm -+ PATHS ${PC_TOMSFASTMATH_LIBRARY_DIRS} -+) -+ -+set(TOMSFASTMATH_VERSION ${PC_TOMSFASTMATH_VERSION}) -+ -+include(FindPackageHandleStandardArgs) -+find_package_handle_standard_args(TOMSFASTMATH -+ FOUND_VAR TOMSFASTMATH_FOUND -+ REQUIRED_VARS -+ TOMSFASTMATH_LIBRARY -+ TOMSFASTMATH_INCLUDE_DIR -+ VERSION_VAR TOMSFASTMATH_VERSION -+) -+ -+if(TOMSFASTMATH_FOUND) -+ set(TOMSFASTMATH_LIBRARIES ${TOMSFASTMATH_LIBRARY}) -+ set(TOMSFASTMATH_INCLUDE_DIRS ${TOMSFASTMATH_INCLUDE_DIR}) -+ set(TOMSFASTMATH_DEFINITIONS ${PC_TOMSFASTMATH_CFLAGS_OTHER}) -+endif() -+ -+mark_as_advanced( -+ TOMSFASTMATH_INCLUDE_DIR -+ TOMSFASTMATH_LIBRARY -+) -+endif() -diff --git a/libclamav/CMakeLists.txt b/libclamav/CMakeLists.txt -index 6bc426f..0d35e3a 100644 ---- a/libclamav/CMakeLists.txt -+++ b/libclamav/CMakeLists.txt -@@ -3,6 +3,7 @@ - include_directories( - ${LIBXML2_INCLUDE_DIR} - ${OPENSSL_INCLUDE_DIR} -+ ${TOMSFASTMATH_INCLUDE_DIR} - ${ZLIB_INCLUDE_DIR} - ${CMAKE_CURRENT_BINARY_DIR} - $ -@@ -166,6 +167,7 @@ target_link_libraries( yara - PCRE2::pcre2 - JSONC::jsonc ) - -+if(TOMFASTMATH_BUILTIN) - add_library( tomsfastmath OBJECT ) - target_sources( tomsfastmath - PRIVATE -@@ -259,6 +261,7 @@ target_include_directories( tomsfastmath - PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ) - set_target_properties( tomsfastmath PROPERTIES - COMPILE_FLAGS "${WARNCFLAGS}" ) -+endif() - - # Bytecode Runtime - add_library( bytecode_runtime OBJECT ) -@@ -525,7 +528,7 @@ if(ENABLE_SHARED_LIB) - regex - lzma_sdk - yara -- tomsfastmath -+ ${TOMFASTMATH_LIB_NAME} - bytecode_runtime - ${LIBMSPACK} - ClamAV::libclamav_rust -@@ -635,7 +638,7 @@ if(ENABLE_STATIC_LIB) - regex - lzma_sdk - yara -- tomsfastmath -+ ${TOMFASTMATH_LIB_NAME} - bytecode_runtime - ${LIBMSPACK} - ClamAV::libclamav_rust -diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt -index 567e95e..0122929 100644 ---- a/unit_tests/CMakeLists.txt -+++ b/unit_tests/CMakeLists.txt -@@ -49,7 +49,7 @@ if(ENABLE_APP) - PRIVATE - ClamAV::libclamav - libcheck::check -- tomsfastmath -+ ${TOMFASTMATH_LIB_NAME} - JSONC::jsonc - ${LIBMSPACK} - OpenSSL::SSL -@@ -85,7 +85,7 @@ if(ENABLE_APP) - ClamAV::libclamav - ClamAV::common - libcheck::check -- tomsfastmath -+ ${TOMFASTMATH_LIB_NAME} - JSONC::jsonc - ${LIBMSPACK} - OpenSSL::SSL -@@ -133,7 +133,7 @@ target_link_libraries(check_clamav - PRIVATE - ClamAV::libclamav - libcheck::check -- tomsfastmath -+ ${TOMFASTMATH_LIB_NAME} - JSONC::jsonc - ${LIBMSPACK} - OpenSSL::SSL diff -Nru clamav-1.0.1+dfsg/debian/po/cs.po clamav-1.0.2+dfsg/debian/po/cs.po --- clamav-1.0.1+dfsg/debian/po/cs.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/cs.po 2023-08-23 14:44:37.000000000 +0000 @@ -15,7 +15,7 @@ msgstr "" "Project-Id-Version: clamav\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2010-10-13 19:12+0200\n" "Last-Translator: Miroslav Kure \n" "Language-Team: Czech \n" @@ -232,41 +232,19 @@ #. Description #: ../clamav-freshclam.templates:9001 #, fuzzy -#| msgid "Do you want to enable mail scanning?" -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Chcete povolit prohledávání pošty?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 -#, fuzzy #| msgid "Do you want to load bytecode from the database?" msgid "Do you want to download the bytecode database?" msgstr "Chcete z databáze nahrávat bajtkód?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -280,7 +258,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 #, fuzzy #| msgid "Do you want to enable mail scanning?" @@ -1453,8 +1431,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" "Měli byste se ujistit, že je tato hodnota menší, než hodnota " "„StreamMaxLength“ uvedená v konfiguračním souboru clamd.conf." @@ -1488,6 +1466,11 @@ msgstr "" #, fuzzy +#~| msgid "Do you want to enable mail scanning?" +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Chcete povolit prohledávání pošty?" + +#, fuzzy #~| msgid "Do you want to enable archive scanning?" #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Chcete povolit prohledávání archivů?" diff -Nru clamav-1.0.1+dfsg/debian/po/da.po clamav-1.0.2+dfsg/debian/po/da.po --- clamav-1.0.1+dfsg/debian/po/da.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/da.po 2023-08-23 14:44:37.000000000 +0000 @@ -8,7 +8,7 @@ msgstr "" "Project-Id-Version: clamav\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2014-07-06 12:42+0000\n" "Last-Translator: Joe Hansen \n" "Language-Team: Danish \n" @@ -229,47 +229,18 @@ #. Type: boolean #. Description #: ../clamav-freshclam.templates:9001 -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Ønsker du at aktivere understøttelse for Google Safe Browsing?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" -"Når aktiveret for første gang vil freshclam hente en ny databasefil " -"(safebrowsing.cvd), som vil blive indlæst automatisk af clamd og clamscan " -"under den næste genindlæsning, så længe at den heuristiske phishing-" -"detektering er tændt. Denne database inkluderer information om " -"internetsider, som kan være phishing-sider eller mulige kilder til malware. " -"Når du bruger denne indstilling, er det krævet at du kører freshclam mindst " -"hver 30. minut. Freshclam bruger ClamAV's spejlinfrastruktur til at " -"distribuere databasen og dets opdateringer, men alt indhold leveres under " -"Googles betingelser for brug." - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 msgid "Do you want to download the bytecode database?" msgstr "Vil du hente bytecode-databasen?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "Privat spejl for freshclam:" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -291,7 +262,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 msgid "Do you want to enable log rotation?" msgstr "Ønsker du at aktivere logrotation?" @@ -1491,8 +1462,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" "Du bør tjekke at denne værdi er mindre end værdien af »StreamMaxLength« i " "clamd.conf-filen." @@ -1535,6 +1506,30 @@ "Bemærk: Selvom det sikkert er en god ide at aktivere denne indstilling, så " "er standardværdien deaktiveret på grund af arv (legacy)." +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Ønsker du at aktivere understøttelse for Google Safe Browsing?" + +#~ msgid "" +#~ "When activated for the first time, freshclam will download a new database " +#~ "file (safebrowsing.cvd) which will be automatically loaded by clamd and " +#~ "clamscan during the next reload, provided that the heuristic phishing " +#~ "detection is turned on. This database includes information about websites " +#~ "that may be phishing sites or possible sources of malware. When using " +#~ "this option, it's mandatory to run freshclam at least every 30 minutes. " +#~ "Freshclam uses the ClamAV's mirror infrastructure to distribute the " +#~ "database and its updates but all the contents are provided under Google's " +#~ "terms of use." +#~ msgstr "" +#~ "Når aktiveret for første gang vil freshclam hente en ny databasefil " +#~ "(safebrowsing.cvd), som vil blive indlæst automatisk af clamd og clamscan " +#~ "under den næste genindlæsning, så længe at den heuristiske phishing-" +#~ "detektering er tændt. Denne database inkluderer information om " +#~ "internetsider, som kan være phishing-sider eller mulige kilder til " +#~ "malware. Når du bruger denne indstilling, er det krævet at du kører " +#~ "freshclam mindst hver 30. minut. Freshclam bruger ClamAV's " +#~ "spejlinfrastruktur til at distribuere databasen og dets opdateringer, men " +#~ "alt indhold leveres under Googles betingelser for brug." + #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Vil du aktivere ved adgang-skanning?" diff -Nru clamav-1.0.1+dfsg/debian/po/de.po clamav-1.0.2+dfsg/debian/po/de.po --- clamav-1.0.1+dfsg/debian/po/de.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/de.po 2023-08-23 14:44:37.000000000 +0000 @@ -14,7 +14,7 @@ msgstr "" "Project-Id-Version: clamav_0.98.4~rc1+dfsg-3_de\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2014-06-12 10:23+0100\n" "Last-Translator: Mario Blättermann \n" "Language-Team: Deutsch \n" @@ -241,48 +241,18 @@ #. Type: boolean #. Description #: ../clamav-freshclam.templates:9001 -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Soll die Unterstützung für Google Safe Browsing aktiviert werden?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" -"Wenn dies zum ersten Mal aktiviert wird, dann lädt Freshclam eine neue " -"Datenbankdatei (safebrowsing.cvd) herunter, die automatisch von Clamd und " -"Clamscan beim nächsten erneuten Laden berücksichtigt wird, sofern die " -"heuristische Phishing-Erkennung eingeschaltet ist. Diese Datenbank enthält " -"Informationen über Webseiten, die Phishing-Seiten oder mögliche Malware-" -"Quellen sein könnten. Bei Verwendung dieser Option ist es vorgeschrieben, " -"dass Freshclam mindestens alle 30 Minuten ausgeführt wird. Freshclam " -"verwendet die Spiegel-Infrastruktur von ClamAV, um die Datenbank und deren " -"Aktualisierungen zu verteilen, aber alle Inhalte werden unter den " -"Nutzungsbedingungen von Google bereitgestellt." - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 msgid "Do you want to download the bytecode database?" msgstr "Wollen Sie die Bytecode-Datenbank herunterladen?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "Privater Spiegel für Freshclam:" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -306,7 +276,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 msgid "Do you want to enable log rotation?" msgstr "Soll das Rotieren der Protokolldateien aktiviert werden?" @@ -1532,8 +1502,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" "Sie sollten sicherstellen, dass dieser Wert kleiner als der Wert von " "»StreamMaxLength« in der Datei clamd.conf ist." @@ -1578,6 +1548,31 @@ "einzuschalten, ist der Standardwert aus historischen Gründen auf »Aus« " "geschaltet." +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Soll die Unterstützung für Google Safe Browsing aktiviert werden?" + +#~ msgid "" +#~ "When activated for the first time, freshclam will download a new database " +#~ "file (safebrowsing.cvd) which will be automatically loaded by clamd and " +#~ "clamscan during the next reload, provided that the heuristic phishing " +#~ "detection is turned on. This database includes information about websites " +#~ "that may be phishing sites or possible sources of malware. When using " +#~ "this option, it's mandatory to run freshclam at least every 30 minutes. " +#~ "Freshclam uses the ClamAV's mirror infrastructure to distribute the " +#~ "database and its updates but all the contents are provided under Google's " +#~ "terms of use." +#~ msgstr "" +#~ "Wenn dies zum ersten Mal aktiviert wird, dann lädt Freshclam eine neue " +#~ "Datenbankdatei (safebrowsing.cvd) herunter, die automatisch von Clamd und " +#~ "Clamscan beim nächsten erneuten Laden berücksichtigt wird, sofern die " +#~ "heuristische Phishing-Erkennung eingeschaltet ist. Diese Datenbank " +#~ "enthält Informationen über Webseiten, die Phishing-Seiten oder mögliche " +#~ "Malware-Quellen sein könnten. Bei Verwendung dieser Option ist es " +#~ "vorgeschrieben, dass Freshclam mindestens alle 30 Minuten ausgeführt " +#~ "wird. Freshclam verwendet die Spiegel-Infrastruktur von ClamAV, um die " +#~ "Datenbank und deren Aktualisierungen zu verteilen, aber alle Inhalte " +#~ "werden unter den Nutzungsbedingungen von Google bereitgestellt." + #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Soll die Bei-Zugriff-Überprüfung aktiviert werden?" diff -Nru clamav-1.0.1+dfsg/debian/po/es.po clamav-1.0.2+dfsg/debian/po/es.po --- clamav-1.0.1+dfsg/debian/po/es.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/es.po 2023-08-23 14:44:37.000000000 +0000 @@ -42,7 +42,7 @@ msgstr "" "Project-Id-Version: clamav 0.96+dfsg-4\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2014-12-05 18:25+0100\n" "Last-Translator: Javier Fernández-Sanguino Peña \n" "Language-Team: Debian Spanish \n" @@ -288,48 +288,18 @@ #. Type: boolean #. Description #: ../clamav-freshclam.templates:9001 -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "¿Desea activar el soporte para la Navegación Segura de Google?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" -"Cuando se activa por primera vez, freshclam descargará un nuevo archivo de " -"base de datos (safebrowsing.cvd) que se cargará automáticamente por clamd y " -"clamscan en el siguiente reinicio. Esto se realizará siempre que la " -"detección heurística de phishing esté activa. Esta base de datos incluye " -"información de sitios web que pueden ser sitios de robo de credenciales " -"(«phishing») o posibles fuentes de programas maliciosos. Es obligatorio " -"ejecutar freshclam al menos cada 30 minutos si se utiliza esta opción. " -"Freshclam utiliza la infraestructura de réplicas de ClamAV para distribuir " -"la base de datos y sus actualizaciones, pero todos los contenidos se ofrecen " -"bajo los términos de uso de Google." - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 msgid "Do you want to download the bytecode database?" msgstr "¿Desea descargar la base de datos de bytecodes?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "Réplica privada para freshclam:" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -353,7 +323,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 msgid "Do you want to enable log rotation?" msgstr "¿Desea activar la rotación de logs?" @@ -1574,8 +1544,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" "Debería comprobar que este valor es menor que el valor de «StreamMaxLength» " "en el fichero «clamd.conf»." @@ -1620,6 +1590,31 @@ "Nota: Aunque habitualmente sea una buena idea habilitar esta opción, por " "omisión se deshabilita por motivos históricos." +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "¿Desea activar el soporte para la Navegación Segura de Google?" + +#~ msgid "" +#~ "When activated for the first time, freshclam will download a new database " +#~ "file (safebrowsing.cvd) which will be automatically loaded by clamd and " +#~ "clamscan during the next reload, provided that the heuristic phishing " +#~ "detection is turned on. This database includes information about websites " +#~ "that may be phishing sites or possible sources of malware. When using " +#~ "this option, it's mandatory to run freshclam at least every 30 minutes. " +#~ "Freshclam uses the ClamAV's mirror infrastructure to distribute the " +#~ "database and its updates but all the contents are provided under Google's " +#~ "terms of use." +#~ msgstr "" +#~ "Cuando se activa por primera vez, freshclam descargará un nuevo archivo " +#~ "de base de datos (safebrowsing.cvd) que se cargará automáticamente por " +#~ "clamd y clamscan en el siguiente reinicio. Esto se realizará siempre que " +#~ "la detección heurística de phishing esté activa. Esta base de datos " +#~ "incluye información de sitios web que pueden ser sitios de robo de " +#~ "credenciales («phishing») o posibles fuentes de programas maliciosos. Es " +#~ "obligatorio ejecutar freshclam al menos cada 30 minutos si se utiliza " +#~ "esta opción. Freshclam utiliza la infraestructura de réplicas de ClamAV " +#~ "para distribuir la base de datos y sus actualizaciones, pero todos los " +#~ "contenidos se ofrecen bajo los términos de uso de Google." + #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "¿Desea activar el análisis en el momento del acceso?" diff -Nru clamav-1.0.1+dfsg/debian/po/eu.po clamav-1.0.2+dfsg/debian/po/eu.po --- clamav-1.0.1+dfsg/debian/po/eu.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/eu.po 2023-08-23 14:44:37.000000000 +0000 @@ -7,7 +7,7 @@ msgstr "" "Project-Id-Version: clamav-eu\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2008-09-24 11:03+0200\n" "Last-Translator: Piarres Beobide \n" "Language-Team: Euskara \n" @@ -228,41 +228,19 @@ #. Description #: ../clamav-freshclam.templates:9001 #, fuzzy -#| msgid "Do you want to enable mail scanning?" -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Posta eskaneatzea gaitu nahi al duzu?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 -#, fuzzy #| msgid "Do you want to log time information with each message?" msgid "Do you want to download the bytecode database?" msgstr "Mezu bakoitzarekin ordu informazioa erregistratzea nahi duzu?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -276,7 +254,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 #, fuzzy #| msgid "Do you want to enable mail scanning?" @@ -1397,8 +1375,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" #. Type: boolean @@ -1430,6 +1408,11 @@ msgstr "" #, fuzzy +#~| msgid "Do you want to enable mail scanning?" +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Posta eskaneatzea gaitu nahi al duzu?" + +#, fuzzy #~| msgid "Do you want to enable archive scanning?" #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Artxibo eskaneatzea gaitu nahi al duzu?" diff -Nru clamav-1.0.1+dfsg/debian/po/fi.po clamav-1.0.2+dfsg/debian/po/fi.po --- clamav-1.0.1+dfsg/debian/po/fi.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/fi.po 2023-08-23 14:44:37.000000000 +0000 @@ -3,7 +3,7 @@ msgstr "" "Project-Id-Version: clamav\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2009-06-22 22:47+0300\n" "Last-Translator: Esko Arajärvi \n" "Language-Team: Finnish \n" @@ -224,41 +224,19 @@ #. Description #: ../clamav-freshclam.templates:9001 #, fuzzy -#| msgid "Do you want to enable mail scanning?" -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Otetaanko käyttöön sähköpostien tutkinta?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 -#, fuzzy #| msgid "Do you want to log time information with each message?" msgid "Do you want to download the bytecode database?" msgstr "Haluatko tallentaa aikatiedon jokaisen viestin yhteydessä?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -272,7 +250,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 #, fuzzy #| msgid "Do you want to enable mail scanning?" @@ -1464,8 +1442,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" "Tarkista, että tämä arvo on pienempi kuin tiedostossa clamd.conf " "attribuutille ”StreamMaxLength” asetettu arvo." @@ -1499,6 +1477,11 @@ msgstr "" #, fuzzy +#~| msgid "Do you want to enable mail scanning?" +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Otetaanko käyttöön sähköpostien tutkinta?" + +#, fuzzy #~| msgid "Do you want to enable archive scanning?" #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Otetaanko käyttöön arkistojen tutkinta?" diff -Nru clamav-1.0.1+dfsg/debian/po/fr.po clamav-1.0.2+dfsg/debian/po/fr.po --- clamav-1.0.1+dfsg/debian/po/fr.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/fr.po 2023-08-23 14:44:37.000000000 +0000 @@ -10,7 +10,7 @@ msgstr "" "Project-Id-Version: clamav_0.98.4\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2014-06-10 14:21+0100\n" "Last-Translator: Julien Patriarca \n" "Language-Team: FRENCH \n" @@ -240,36 +240,6 @@ #. Type: boolean #. Description #: ../clamav-freshclam.templates:9001 -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Souhaitez-vous activer le support de « Google Safe Browsing » ?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" -"Lors du premier lancement, freshclam téléchargera un nouveau fichier de base " -"de données (safebrowsing.cvd) qui sera automatiquement chargé par clamd et " -"clamscan lors du prochain rechargement, si la détection d'hameçonnage " -"heuristique est activée. Cette base de données contient des informations à " -"propos de sites internet qui pourraient être des sites d'hameçonnage ou de " -"possibles sources de logiciels malveillants. Lorsque vous utilisez cette " -"option, il est obligatoire de lancer freshclam au moins toutes les " -"30 minutes. Freshclam utilise l'infrastructure de miroirs de ClamAV pour " -"distribuer la base de données et ses mises à jour mais tout le contenu est " -"fourni selon les conditions d'utilisation de Google." - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 msgid "Do you want to download the bytecode database?" msgstr "" "Souhaitez-vous télécharger la base de données du code intermédiaire " @@ -277,13 +247,13 @@ #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "Miroir privé pour freshclam :" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -307,7 +277,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 msgid "Do you want to enable log rotation?" msgstr "Souhaitez-vous activer la rotation des journaux ?" @@ -1533,8 +1503,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" "Vous devez vérifier que cette valeur est inférieure à la valeur de " "« StreamMaxLength » du fichier de configuration clamd.conf." @@ -1578,6 +1548,32 @@ "Bien que ce soit probablement une bonne idée d'activer cette option, pour " "des raisons historiques, la valeur est désactivée par défaut." +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Souhaitez-vous activer le support de « Google Safe Browsing » ?" + +#~ msgid "" +#~ "When activated for the first time, freshclam will download a new database " +#~ "file (safebrowsing.cvd) which will be automatically loaded by clamd and " +#~ "clamscan during the next reload, provided that the heuristic phishing " +#~ "detection is turned on. This database includes information about websites " +#~ "that may be phishing sites or possible sources of malware. When using " +#~ "this option, it's mandatory to run freshclam at least every 30 minutes. " +#~ "Freshclam uses the ClamAV's mirror infrastructure to distribute the " +#~ "database and its updates but all the contents are provided under Google's " +#~ "terms of use." +#~ msgstr "" +#~ "Lors du premier lancement, freshclam téléchargera un nouveau fichier de " +#~ "base de données (safebrowsing.cvd) qui sera automatiquement chargé par " +#~ "clamd et clamscan lors du prochain rechargement, si la détection " +#~ "d'hameçonnage heuristique est activée. Cette base de données contient des " +#~ "informations à propos de sites internet qui pourraient être des sites " +#~ "d'hameçonnage ou de possibles sources de logiciels malveillants. Lorsque " +#~ "vous utilisez cette option, il est obligatoire de lancer freshclam au " +#~ "moins toutes les 30 minutes. Freshclam utilise l'infrastructure de " +#~ "miroirs de ClamAV pour distribuer la base de données et ses mises à jour " +#~ "mais tout le contenu est fourni selon les conditions d'utilisation de " +#~ "Google." + #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Souhaitez-vous activer la vérification des archives ?" diff -Nru clamav-1.0.1+dfsg/debian/po/gl.po clamav-1.0.2+dfsg/debian/po/gl.po --- clamav-1.0.1+dfsg/debian/po/gl.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/gl.po 2023-08-23 14:44:37.000000000 +0000 @@ -7,7 +7,7 @@ msgstr "" "Project-Id-Version: clamav\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2009-05-06 21:40+0200\n" "Last-Translator: marce villarino \n" "Language-Team: Galician \n" @@ -231,41 +231,19 @@ #. Description #: ../clamav-freshclam.templates:9001 #, fuzzy -#| msgid "Do you want to enable mail scanning?" -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Desexa activar o exame do correo?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 -#, fuzzy #| msgid "Do you want to log time information with each message?" msgid "Do you want to download the bytecode database?" msgstr "Desexa rexistrar a hora con cada mensaxe?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -279,7 +257,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 #, fuzzy #| msgid "Do you want to enable mail scanning?" @@ -1476,8 +1454,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" "Debería verificar que este valor é menor que o de «StreamMaxLength» no " "ficheiro clamd.conf." @@ -1511,6 +1489,11 @@ msgstr "" #, fuzzy +#~| msgid "Do you want to enable mail scanning?" +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Desexa activar o exame do correo?" + +#, fuzzy #~| msgid "Do you want to enable archive scanning?" #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Desexa activar o exame de arquivos?" diff -Nru clamav-1.0.1+dfsg/debian/po/it.po clamav-1.0.2+dfsg/debian/po/it.po --- clamav-1.0.1+dfsg/debian/po/it.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/it.po 2023-08-23 14:44:37.000000000 +0000 @@ -7,7 +7,7 @@ msgstr "" "Project-Id-Version: clamav 0.96.1+dfsg-3 \n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2011-02-19 15:31+0100\n" "Last-Translator: Luca Monducci \n" "Language-Team: Italian \n" @@ -231,41 +231,19 @@ #. Description #: ../clamav-freshclam.templates:9001 #, fuzzy -#| msgid "Do you want to enable mail scanning?" -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Attivare l'analisi delle email?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 -#, fuzzy #| msgid "Do you want to load bytecode from the database?" msgid "Do you want to download the bytecode database?" msgstr "Caricare il bytecode dal database?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -279,7 +257,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 #, fuzzy #| msgid "Do you want to enable mail scanning?" @@ -1465,8 +1443,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" "Assicurarsi che questo valore sia inferiore al valore di \"StreamMaxLength\" " "nel file clamd.conf" @@ -1500,6 +1478,11 @@ msgstr "" #, fuzzy +#~| msgid "Do you want to enable mail scanning?" +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Attivare l'analisi delle email?" + +#, fuzzy #~| msgid "Do you want to enable archive scanning?" #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Attivare la scansione degli archivi?" diff -Nru clamav-1.0.1+dfsg/debian/po/ja.po clamav-1.0.2+dfsg/debian/po/ja.po --- clamav-1.0.1+dfsg/debian/po/ja.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/ja.po 2023-08-23 14:44:37.000000000 +0000 @@ -3,7 +3,7 @@ msgstr "" "Project-Id-Version: clamav\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2014-05-11 06:12+0900\n" "Last-Translator: Kenshi Muto \n" "Language-Team: Japanese \n" @@ -222,46 +222,18 @@ #. Type: boolean #. Description #: ../clamav-freshclam.templates:9001 -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Google Safe Browsing を有効にしますか?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" -"初回の有効化時に freshclam は新しいデータベースファイル (safebrowsing.cvd) を" -"ダウンロードします。このファイルは偽装の発見的検知 (heuristic phishing " -"detection) を有効にしていれば clamd や clamscan が次回リロード時に自動的に読" -"み込みます。このデータベースには、偽装サイトやマルウェアのソースの可能性があ" -"るウェブサイトについての情報が収録されています。このオプションを使う場合は " -"freshclam を最低でも30分に一度は実行することが必須となります。freshclam は " -"ClamAV のミラー基盤を使ってデータベースや更新を配布しますが、その内容は全て" -"が Google の定める利用条件の下で提供されています。" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 msgid "Do you want to download the bytecode database?" msgstr "バイトコードデータベースをダウンロードしますか?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "freshclam 用プライベートミラー:" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -283,7 +255,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 msgid "Do you want to enable log rotation?" msgstr "ログファイル切り替えを有効にしますか?" @@ -1462,8 +1434,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" "この値が clamd.conf ファイルの \"StreamMaxLength\" 値よりも低いことを確認して" "ください。" @@ -1506,6 +1478,29 @@ "注意: このオプションを有効にするのは恐らく良い選択ではありますが、歴史的理由" "によりデフォルト値は現在 off となっています。" +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Google Safe Browsing を有効にしますか?" + +#~ msgid "" +#~ "When activated for the first time, freshclam will download a new database " +#~ "file (safebrowsing.cvd) which will be automatically loaded by clamd and " +#~ "clamscan during the next reload, provided that the heuristic phishing " +#~ "detection is turned on. This database includes information about websites " +#~ "that may be phishing sites or possible sources of malware. When using " +#~ "this option, it's mandatory to run freshclam at least every 30 minutes. " +#~ "Freshclam uses the ClamAV's mirror infrastructure to distribute the " +#~ "database and its updates but all the contents are provided under Google's " +#~ "terms of use." +#~ msgstr "" +#~ "初回の有効化時に freshclam は新しいデータベースファイル (safebrowsing." +#~ "cvd) をダウンロードします。このファイルは偽装の発見的検知 (heuristic " +#~ "phishing detection) を有効にしていれば clamd や clamscan が次回リロード時" +#~ "に自動的に読み込みます。このデータベースには、偽装サイトやマルウェアのソー" +#~ "スの可能性があるウェブサイトについての情報が収録されています。このオプショ" +#~ "ンを使う場合は freshclam を最低でも30分に一度は実行することが必須となりま" +#~ "す。freshclam は ClamAV のミラー基盤を使ってデータベースや更新を配布します" +#~ "が、その内容は全てが Google の定める利用条件の下で提供されています。" + #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "アクセス時の走査を有効にしますか?" diff -Nru clamav-1.0.1+dfsg/debian/po/nl.po clamav-1.0.2+dfsg/debian/po/nl.po --- clamav-1.0.1+dfsg/debian/po/nl.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/nl.po 2023-08-23 14:44:37.000000000 +0000 @@ -9,7 +9,7 @@ msgstr "" "Project-Id-Version: clamav 0.97.3+dfsg-2\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2014-09-23 18:02+0200\n" "Last-Translator: Frans Spiesschaert \n" "Language-Team: Debian Dutch l10n Team \n" @@ -237,48 +237,18 @@ #. Type: boolean #. Description #: ../clamav-freshclam.templates:9001 -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Wilt u ondersteuning voor 'Google Veilig Surfen' aanzetten?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" -"De eerste maal dat freshclam geactiveerd wordt, zal het een nieuw " -"databasebestand downloaden (safebrowsing.cvd). Als \"heuristisch opsporen " -"van phishing\" actief is, zullen clamd en clamscan deze database automatisch " -"gaan gebruiken. Het is een database die informatie bevat over websites die " -"misschien wel gebruik maken van phishing of mogelijke verspreiders van " -"malware zijn. Indien u van deze optie gebruik maakt, is het verplicht om " -"minstens elke 30 minuten het commando freshclam uit te laten voeren. " -"Freshclam maakt gebruik van de spiegelserverinfrastructuur van ClamAV om de " -"database en aanpassingen eraan aan te bieden, maar alle inhoud wordt " -"aangeboden onder de gebruikscondities die Google stelt." - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 msgid "Do you want to download the bytecode database?" msgstr "Wilt u de bytecodedatabase laden?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "Privéspiegelserver voor freshclam:" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -302,7 +272,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 msgid "Do you want to enable log rotation?" msgstr "Wilt u logrotatie activeren?" @@ -1535,8 +1505,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" "U moet controleren dat deze waarde lager is dan de waarde van " "\"StreamMaxLength\" in het bestand clamd.conf." @@ -1581,6 +1551,31 @@ "Merk op: hoewel het wellicht een goede zaak is om deze optie aan te zetten, " "staat ze standaard toch uit om historische redenen." +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Wilt u ondersteuning voor 'Google Veilig Surfen' aanzetten?" + +#~ msgid "" +#~ "When activated for the first time, freshclam will download a new database " +#~ "file (safebrowsing.cvd) which will be automatically loaded by clamd and " +#~ "clamscan during the next reload, provided that the heuristic phishing " +#~ "detection is turned on. This database includes information about websites " +#~ "that may be phishing sites or possible sources of malware. When using " +#~ "this option, it's mandatory to run freshclam at least every 30 minutes. " +#~ "Freshclam uses the ClamAV's mirror infrastructure to distribute the " +#~ "database and its updates but all the contents are provided under Google's " +#~ "terms of use." +#~ msgstr "" +#~ "De eerste maal dat freshclam geactiveerd wordt, zal het een nieuw " +#~ "databasebestand downloaden (safebrowsing.cvd). Als \"heuristisch opsporen " +#~ "van phishing\" actief is, zullen clamd en clamscan deze database " +#~ "automatisch gaan gebruiken. Het is een database die informatie bevat over " +#~ "websites die misschien wel gebruik maken van phishing of mogelijke " +#~ "verspreiders van malware zijn. Indien u van deze optie gebruik maakt, is " +#~ "het verplicht om minstens elke 30 minuten het commando freshclam uit te " +#~ "laten voeren. Freshclam maakt gebruik van de spiegelserverinfrastructuur " +#~ "van ClamAV om de database en aanpassingen eraan aan te bieden, maar alle " +#~ "inhoud wordt aangeboden onder de gebruikscondities die Google stelt." + #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Wilt u de functie scannen-bij-openen aanzetten?" diff -Nru clamav-1.0.1+dfsg/debian/po/pl.po clamav-1.0.2+dfsg/debian/po/pl.po --- clamav-1.0.1+dfsg/debian/po/pl.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/pl.po 2023-08-23 14:44:37.000000000 +0000 @@ -7,7 +7,7 @@ msgstr "" "Project-Id-Version: \n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2012-02-10 16:25+0100\n" "Last-Translator: Michał Kułach \n" "Language-Team: Polish \n" @@ -230,41 +230,19 @@ #. Description #: ../clamav-freshclam.templates:9001 #, fuzzy -#| msgid "Do you want to enable mail scanning?" -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Włączyć skanowanie poczty elektronicznej?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 -#, fuzzy #| msgid "Do you want to load bytecode from the database?" msgid "Do you want to download the bytecode database?" msgstr "Ładować kod bajtowy z bazy danych?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -278,7 +256,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 #, fuzzy #| msgid "Do you want to enable mail scanning?" @@ -1470,8 +1448,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" "Proszę sprawdzić, czy wartość ta jest niższa niż wartość \"StreamMaxLength\" " "w pliku clamd.conf." @@ -1505,6 +1483,11 @@ msgstr "" #, fuzzy +#~| msgid "Do you want to enable mail scanning?" +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Włączyć skanowanie poczty elektronicznej?" + +#, fuzzy #~| msgid "Do you want to enable archive scanning?" #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Włączyć skanowanie archiwów?" diff -Nru clamav-1.0.1+dfsg/debian/po/pt_BR.po clamav-1.0.2+dfsg/debian/po/pt_BR.po --- clamav-1.0.1+dfsg/debian/po/pt_BR.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/pt_BR.po 2023-08-23 14:44:37.000000000 +0000 @@ -10,7 +10,7 @@ msgstr "" "Project-Id-Version: clamav\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2016-02-23 07:22-0300\n" "Last-Translator: Adriano Rafael Gomes \n" "Language-Team: Brazilian Portuguese \n" "Language-Team: Portuguese \n" @@ -229,48 +229,18 @@ #. Type: boolean #. Description #: ../clamav-freshclam.templates:9001 -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Deseja activar o suporte para Google Safe Browsing?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" -"Quando activado pela primeira vez, o freshclam irá descarregar um novo " -"ficheiro de base de dados (safebrowsing.cvd) o qual será carregado " -"automaticamente pelo clamd e pelo clamscan durante o próximo arranque, " -"contando que a detecção de phishing heurística esteja ligada. Esta base de " -"dados inclui informação acerca de websites que podem ser sites de phishing " -"ou possíveis fontes de malware. Quando se usa esta opção, é obrigatório " -"correr o freshclam a cada 30 minutos no mínimo. O freshclam usa a " -"infraestrutura de mirror do ClamAV para distribuir a base de dados e as suas " -"actualizações mas todos os conteúdos são disponibilizados sob os termos de " -"utilização da Google." - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 msgid "Do you want to download the bytecode database?" msgstr "Deseja descarregar a base de dados de bytecode?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "Mirror privado para o freshclam:" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -293,7 +263,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 msgid "Do you want to enable log rotation?" msgstr "Deseja activar a rotação dos relatórios?" @@ -1299,9 +1269,9 @@ "headers will be attached to each processed message, possibly replacing " "existing similar headers." msgstr "" -"Se escolher esta opção, os cabeçalhos \"X-Virus-Scanned\" e \"X-Virus-Status" -"\" serão anexados a cada mensagem processada, possivelmente substituindo " -"cabeçalhos semelhantes já existentes." +"Se escolher esta opção, os cabeçalhos \"X-Virus-Scanned\" e \"X-Virus-" +"Status\" serão anexados a cada mensagem processada, possivelmente " +"substituindo cabeçalhos semelhantes já existentes." #. Type: string #. Description @@ -1496,11 +1466,11 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" -"Você deve verificar que este valor é inferior ao valor de \"StreamMaxLength" -"\" no ficheiro clamd.conf." +"Você deve verificar que este valor é inferior ao valor de " +"\"StreamMaxLength\" no ficheiro clamd.conf." #. Type: boolean #. Description @@ -1542,6 +1512,31 @@ "predefinido está presentemente definido para a desactivar por razões de " "compatibilidades antigas (legacy)." +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Deseja activar o suporte para Google Safe Browsing?" + +#~ msgid "" +#~ "When activated for the first time, freshclam will download a new database " +#~ "file (safebrowsing.cvd) which will be automatically loaded by clamd and " +#~ "clamscan during the next reload, provided that the heuristic phishing " +#~ "detection is turned on. This database includes information about websites " +#~ "that may be phishing sites or possible sources of malware. When using " +#~ "this option, it's mandatory to run freshclam at least every 30 minutes. " +#~ "Freshclam uses the ClamAV's mirror infrastructure to distribute the " +#~ "database and its updates but all the contents are provided under Google's " +#~ "terms of use." +#~ msgstr "" +#~ "Quando activado pela primeira vez, o freshclam irá descarregar um novo " +#~ "ficheiro de base de dados (safebrowsing.cvd) o qual será carregado " +#~ "automaticamente pelo clamd e pelo clamscan durante o próximo arranque, " +#~ "contando que a detecção de phishing heurística esteja ligada. Esta base " +#~ "de dados inclui informação acerca de websites que podem ser sites de " +#~ "phishing ou possíveis fontes de malware. Quando se usa esta opção, é " +#~ "obrigatório correr o freshclam a cada 30 minutos no mínimo. O freshclam " +#~ "usa a infraestrutura de mirror do ClamAV para distribuir a base de dados " +#~ "e as suas actualizações mas todos os conteúdos são disponibilizados sob " +#~ "os termos de utilização da Google." + #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Deseja activar a inspecção no momento de acesso (on-access)?" diff -Nru clamav-1.0.1+dfsg/debian/po/ru.po clamav-1.0.2+dfsg/debian/po/ru.po --- clamav-1.0.1+dfsg/debian/po/ru.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/ru.po 2023-08-23 14:44:37.000000000 +0000 @@ -8,7 +8,7 @@ msgstr "" "Project-Id-Version: clamav 0.98.4~rc1+dfsg-3\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2014-06-07 09:00+0400\n" "Last-Translator: Yuri Kozlov \n" "Language-Team: Russian \n" @@ -17,8 +17,8 @@ "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "X-Generator: Lokalize 1.5\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" -"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" #. Type: select #. Choices @@ -231,48 +231,18 @@ #. Type: boolean #. Description #: ../clamav-freshclam.templates:9001 -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Включить поддержку Google Safe Browsing?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" -"При первом запуске freshclam скачает новый файл базы данных (safebrowsing." -"cvd), который автоматически загрузится clamd и clamscan при следующий " -"перезагрузке, и будет включено эвристическое обнаружение сетевого " -"мошенничества (heuristic phishing detection). Данная база данных содержит " -"информацию о веб-сайтах, которые могут быть созданы мошенниками или являются " -"источниками вредоносных программ (malware). При включении данного механизма " -"нужно обязательно запускать freshclam не реже чем 1 раз в 30 минут. " -"Freshclam использует инфраструктуру монитора ClamAV для распространения базы " -"данных и её обновления, но всё содержимое предоставляется на условиях " -"использования Google." - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 msgid "Do you want to download the bytecode database?" msgstr "Скачивать базу данных байт-кода?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "Частное зеркало freshclam:" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -294,7 +264,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 msgid "Do you want to enable log rotation?" msgstr "Включить циклическую перестановку журнала (log rotation)?" @@ -1484,8 +1454,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" "Убедитесь, что это значение меньше, чем значение «StreamMaxLength» в clamd." "conf" @@ -1529,6 +1499,31 @@ "Замечание: хотя, вероятно, правильно ответить утвердительно, по умолчанию " "действие выключено в целях обратной совместимости." +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Включить поддержку Google Safe Browsing?" + +#~ msgid "" +#~ "When activated for the first time, freshclam will download a new database " +#~ "file (safebrowsing.cvd) which will be automatically loaded by clamd and " +#~ "clamscan during the next reload, provided that the heuristic phishing " +#~ "detection is turned on. This database includes information about websites " +#~ "that may be phishing sites or possible sources of malware. When using " +#~ "this option, it's mandatory to run freshclam at least every 30 minutes. " +#~ "Freshclam uses the ClamAV's mirror infrastructure to distribute the " +#~ "database and its updates but all the contents are provided under Google's " +#~ "terms of use." +#~ msgstr "" +#~ "При первом запуске freshclam скачает новый файл базы данных (safebrowsing." +#~ "cvd), который автоматически загрузится clamd и clamscan при следующий " +#~ "перезагрузке, и будет включено эвристическое обнаружение сетевого " +#~ "мошенничества (heuristic phishing detection). Данная база данных содержит " +#~ "информацию о веб-сайтах, которые могут быть созданы мошенниками или " +#~ "являются источниками вредоносных программ (malware). При включении " +#~ "данного механизма нужно обязательно запускать freshclam не реже чем 1 раз " +#~ "в 30 минут. Freshclam использует инфраструктуру монитора ClamAV для " +#~ "распространения базы данных и её обновления, но всё содержимое " +#~ "предоставляется на условиях использования Google." + #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Включить сканирование при доступе?" diff -Nru clamav-1.0.1+dfsg/debian/po/sv.po clamav-1.0.2+dfsg/debian/po/sv.po --- clamav-1.0.1+dfsg/debian/po/sv.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/sv.po 2023-08-23 14:44:37.000000000 +0000 @@ -7,7 +7,7 @@ msgstr "" "Project-Id-Version: clamav_0.93~dfsg-1_sv\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2011-06-29 22:37+0100\n" "Last-Translator: Martin Bagge / brother \n" "Language-Team: Swedish \n" @@ -229,41 +229,19 @@ #. Description #: ../clamav-freshclam.templates:9001 #, fuzzy -#| msgid "Do you want to enable mail scanning?" -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Vill du aktivera skanning av e-post?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 -#, fuzzy #| msgid "Do you want to load bytecode from the database?" msgid "Do you want to download the bytecode database?" msgstr "Ska bytekod laddas från databasen?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -277,7 +255,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 #, fuzzy #| msgid "Do you want to enable mail scanning?" @@ -1451,11 +1429,11 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" -"Du bör säkerställa att detta värde är lägre än värdet för \"StreamMaxlength" -"\" i filen clamd.conf." +"Du bör säkerställa att detta värde är lägre än värdet för " +"\"StreamMaxlength\" i filen clamd.conf." #. Type: boolean #. Description @@ -1486,6 +1464,11 @@ msgstr "" #, fuzzy +#~| msgid "Do you want to enable mail scanning?" +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Vill du aktivera skanning av e-post?" + +#, fuzzy #~| msgid "Do you want to enable archive scanning?" #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Vill du aktivera skanning av arkiv?" diff -Nru clamav-1.0.1+dfsg/debian/po/templates.pot clamav-1.0.2+dfsg/debian/po/templates.pot --- clamav-1.0.1+dfsg/debian/po/templates.pot 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/templates.pot 2023-08-23 14:44:37.000000000 +0000 @@ -8,7 +8,7 @@ msgstr "" "Project-Id-Version: clamav\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -191,38 +191,18 @@ #. Type: boolean #. Description #: ../clamav-freshclam.templates:9001 -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 msgid "Do you want to download the bytecode database?" msgstr "" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -236,7 +216,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 msgid "Do you want to enable log rotation?" msgstr "" @@ -1269,8 +1249,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" #. Type: boolean diff -Nru clamav-1.0.1+dfsg/debian/po/vi.po clamav-1.0.2+dfsg/debian/po/vi.po --- clamav-1.0.1+dfsg/debian/po/vi.po 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/po/vi.po 2023-08-23 14:44:37.000000000 +0000 @@ -6,7 +6,7 @@ msgstr "" "Project-Id-Version: clamav 0.96.3+dfsg-2\n" "Report-Msgid-Bugs-To: clamav@packages.debian.org\n" -"POT-Creation-Date: 2021-02-21 15:05+0000\n" +"POT-Creation-Date: 2023-08-23 14:47+0000\n" "PO-Revision-Date: 2010-10-27 16:18+0930\n" "Last-Translator: Clytie Siddall \n" "Language-Team: Vietnamese \n" @@ -228,41 +228,19 @@ #. Description #: ../clamav-freshclam.templates:9001 #, fuzzy -#| msgid "Do you want to enable mail scanning?" -msgid "Do you want to enable support for Google Safe Browsing?" -msgstr "Muốn hiệu lực quét thư?" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:9001 -msgid "" -"When activated for the first time, freshclam will download a new database " -"file (safebrowsing.cvd) which will be automatically loaded by clamd and " -"clamscan during the next reload, provided that the heuristic phishing " -"detection is turned on. This database includes information about websites " -"that may be phishing sites or possible sources of malware. When using this " -"option, it's mandatory to run freshclam at least every 30 minutes. Freshclam " -"uses the ClamAV's mirror infrastructure to distribute the database and its " -"updates but all the contents are provided under Google's terms of use." -msgstr "" - -#. Type: boolean -#. Description -#: ../clamav-freshclam.templates:10001 -#, fuzzy #| msgid "Do you want to load bytecode from the database?" msgid "Do you want to download the bytecode database?" msgstr "Muốn nạp mã byte từ cơ sở dữ liệu ?" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "Private mirror for freshclam:" msgstr "" #. Type: string #. Description -#: ../clamav-freshclam.templates:11001 +#: ../clamav-freshclam.templates:10001 msgid "" "This option allows you to easily point freshclam to private mirrors. If " "PrivateMirror is set, freshclam does not attempt to use DNS to determine " @@ -276,7 +254,7 @@ #. Type: boolean #. Description -#: ../clamav-freshclam.templates:12001 ../clamav-daemon.templates:22001 +#: ../clamav-freshclam.templates:11001 ../clamav-daemon.templates:22001 #: ../clamav-milter.templates:32001 #, fuzzy #| msgid "Do you want to enable mail scanning?" @@ -1471,8 +1449,8 @@ #. Description #: ../clamav-milter.templates:30001 msgid "" -"You should check that this value is lower than the value of \"StreamMaxLength" -"\" in the clamd.conf file." +"You should check that this value is lower than the value of " +"\"StreamMaxLength\" in the clamd.conf file." msgstr "" "Bạn nên kiểm tra lại giá trị này vẫn còn nhỏ hơn giá trị của « " "StreamMaxLength » (chiều dài luồng tối đa) trong tập tin cấu hình « clamd." @@ -1507,6 +1485,11 @@ msgstr "" #, fuzzy +#~| msgid "Do you want to enable mail scanning?" +#~ msgid "Do you want to enable support for Google Safe Browsing?" +#~ msgstr "Muốn hiệu lực quét thư?" + +#, fuzzy #~| msgid "Do you want to enable archive scanning?" #~ msgid "Do you want to enable on-access scanning?" #~ msgstr "Muốn hiệu lực quét kho nén ?" diff -Nru clamav-1.0.1+dfsg/debian/rules clamav-1.0.2+dfsg/debian/rules --- clamav-1.0.1+dfsg/debian/rules 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/rules 2023-08-19 16:44:38.000000000 +0000 @@ -160,3 +160,9 @@ # Don't compress the example configuration files. override_dh_compress: dh_compress -Xexamples + +override_dh_installsystemd: +ifneq (linux, $(DEB_HOST_ARCH_OS)) + dh_installsystemd --name clamav-clamonacc --no-enable --no-start +endif + dh_installsystemd --name clamav-daemon diff -Nru clamav-1.0.1+dfsg/debian/split-tarball.sh clamav-1.0.2+dfsg/debian/split-tarball.sh --- clamav-1.0.1+dfsg/debian/split-tarball.sh 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/split-tarball.sh 2023-08-19 16:44:38.000000000 +0000 @@ -36,9 +36,6 @@ cd clamav-$VERS+dfsg # remove win32 stuff, doesn't make sense to ship it rm -rf win32 -sed -i 's/ win32//' Makefile.am Makefile.in -sed -i 's@libclammspack/config.h:libclammspack/config.h.in@@' configure.ac -find shared -name '*.la' -o -name '*.lo' -o -name Makefile -exec rm {} \; # cleanup llvm set -- libclamav/c++/llvm/utils/lit/lit/*.pyc if [ -f "$1" ] ; then @@ -52,47 +49,18 @@ # remove llvm, we build with the system version #rm -rf libclamav/c++/llvm cp -R libclamunrar_iface $UNRARDIR -cp libclamav/Makefile.am $UNRARDIR/libclamunrar_iface -mv libclamunrar $UNRARDIR -cp -R m4/ $UNRARDIR -cp -R config/ $UNRARDIR -cp platform.h.in $UNRARDIR -cp clamav-types.h.in $UNRARDIR -cp COPYING{,.unrar,.LGPL} $UNRARDIR -cd ../ -tar -cJf $DFSGPKG --numeric-owner clamav-$VERS+dfsg/ -cd $UNRARDIR -echo "Preparing unrar package" -# The sed sorcery below makes sure that the AC_CONFIG_FILES in the unrar package looks -# like: -# AC_CONFIG_FILES([ -# libclamunrar_iface/Makefile -# Makefile -# platform.h -# ]) -# It also removes ltdl, and renames the AC_CONFIG_SRCDIR parameter to an -# existing file. - -cp $UNRAR_CONF configure.ac -cat <Makefile.am && -ACLOCAL_AMFLAGS=-I m4 -DISTCLEANFILES = target.h -SUBDIRS = libclamunrar_iface -EOF +# XXX Add the libclamunrar bits -# The complete Makefile.am from libclamav/Makefile.am is huge and we -# only need the libclamunrar pieces. If we keep everything it will -# break for instance due to missing c++ folder or something else. -# The UNRAR block is the first one followed by LLVM so try to remove -# everything after the LLVM block so we should have enough to get the -# complete libclamunrar compiled. -sed -i '/if ENABLE_LLVM/,$d' libclamunrar_iface/Makefile.am +cd ../ +tar -cJf $DFSGPKG --numeric-owner clamav-$VERS+dfsg/ +echo "missing clamunrar, you need to look at that." +exit 0 -autoreconf -rm -r autom4te.cache -cd .. -tar -cJf $UNRARPKG --numeric-owner libclamunrar-$VERS/ +#cd $UNRARDIR +#echo "Preparing unrar package" +#cd .. +#tar -cJf $UNRARPKG --numeric-owner libclamunrar-$VERS/ printf "Test archives?" read yes diff -Nru clamav-1.0.1+dfsg/debian/upstream/signing-key.asc clamav-1.0.2+dfsg/debian/upstream/signing-key.asc --- clamav-1.0.1+dfsg/debian/upstream/signing-key.asc 2023-07-30 21:11:40.000000000 +0000 +++ clamav-1.0.2+dfsg/debian/upstream/signing-key.asc 2023-08-19 16:44:38.000000000 +0000 @@ -1,51 +1,63 @@ -----BEGIN PGP PUBLIC KEY BLOCK----- -mQINBGBjkiwBEADgJTEabt5zCareK9pJJswGU62smrq3uOaaDhtgztj3bxRY/UGT -jypxMee1S/fGWQZQy52lFOXLud5gFC5QU8Yk+7EAsh2ZJSKtWUw8/iMxZ4vsrKVV -QQRLTqMUY16R6/8UzdIT/hD6CbgWgiXF4NH5AGleNqjkF4TXrGof0AK0veekZYJV -WWStqJR/cIiG0nxDQ87RWfeZgrULZmA8uii22po7rGGzxT0byb83dKK+7IoJ/6B/ -ZlI0PmzuJ9/Xp6Mmm//sdPEqRwedt2aGrvtdF79xYJ1tDhOVMpID0aPdURBwlliq -fyKGaIUEa1ke+Dy7sQF8i3zY7ce6PZOtbsts9xsJLvF98VhRsFy0vProPv1mVbiU -PoxxPTnyLeGUm27amIMl4NfX4a8Hdu+ExzKprqWo3Ir08HQzNt6QoFghDIpi9nm4 -k327CJzJv/g2dq5kY/KU6wFHbdH3zP7u+p9DDqKJYFebPCvwM1hMxPdLqemTsfob -kJ4iXcAXjpMqwXX9m0lyQcRHdIdc99yyCUMdPNfapLgY7rOahsS16795/5KSrCuF -h2RcoAWUjh6sGjgGIY4Hy1qQwp3t6X/L6TOhDkBDWId5bTKFR9NqrVprOVsUutbs -0TOqLyH4GXCpE9vzg8DX7FTdRiCTpbyQ7VuSxRN/vAyVRP4chrABNfvh/QARAQAB +mQINBGQPO58BEACsF0vtWepeSZRklvCG170RKuZL+9aH8U3zVVtQgDlmcboVRiFf ++fgraQCRVh8cbRM76mqqGoMT0BlwZ1OfrzpZcrNUg5uAgok51P7SoCy3zummnv4M +TadwDLEHNf/38HSnrJe196IiwMEtyuKMGDfzyjQnr357Owem+7FgT2/sU7XwWD2B ++tn/yhbw+HpJuUjdmxmEqJr/4okRSj9OSWV+EFhS9owMNK8zntwHkJzmv4ctS1Ak +Zryh/J3jEnPqzSJDsH729XzKpG4BxCxnybP5WuMsJuNvSlVhVko1PaSi84Dy003w +WoQIgtQHNm6i8CcetrpNCULELTU8sViwdBQXIlGjCa3N+dq1ZOErasp4QzlCVOus +iOkm1KltvbJWPfVDW0A0Z4mP19YRlQTc0jn4w9R5ROmwcLf6Co8nBD2AV8MFjVJA +E21Mfj6LfksplmYg/DEa4kCe8KYPSATq6LFSf+o96fkmnsZovOi6zZ6RtV9l4Aya +pkcvk9iO2cvJMDYJ6iA2dC8EHC2m1tt1Rs2abJqOmsUJATo7MUpK7MD7NyhVvkjJ +j5QRES25uV4OY9ck091GB+XXAf3gGf3Pi2jop1gauGoxyBqLT4SkwqsnsrFF8eEh +A8UdBmo4K6MWFaxw6JsBPpIM63Qe848RzlQRanxS2n50ZZwMLIJrI2MEFQARAQAB tDtUYWxvcyAoVGFsb3MsIENpc2NvIFN5c3RlbXMgSW5jLikgPHJlc2VhcmNoQHNv -dXJjZWZpcmUuY29tPokCPgQTAQIAKAUCYGOSLAIbAwUJA8JnAAYLCQgHAwIGFQgC -CQoLBBYCAwECHgECF4AACgkQYJsCTys+3QfbLg//eZ0yCLr957FtztVlLIHYLpJn -LIl8m+hu3KeUTIwvMoCLiw48cWqFZaJS9PTmrraSj5SKMDnAYFl4O0fhHfQiWDjb -sZ32hQni1PcqxoXqSnkXD7mXjcPH2WuNnQM5WZoAD2VmksqRT57I/K2omW/sjaVe -Nbq3GSOy8WThibswxzioDHtTPFa0/Ah2qq8OkcVJuTwCS1xkLijJc3jx/pOBHWFA -BA4VX5pwcSou/woJ+ySsgBGEo5hOsd0r7h3a0O8EiuGulHTqQt87rVWGv0JKhnub -FULr/ld8+d1zGvJL3OzFG6udjWjw3QqsLDZa94G1ksZWgqr/RgexlSYuxPW+lKUC -QkgotLaEKQC4cpBLRcJEjWyrf4IjoJvkFrUtPsVH9VStICUQATyXARNVWbnJHq3Y -qynCXSB4NZvdo9BF6Tx3FA+ZUjK4/X/UsjL/Hmv99huBctQsWL7gQCoSw9YOt4qs -/As6fgPaNpYb9woJqNMEQNmrhfnnX9PGaM5dM769/E5vF67mkhBNqVJ0+4gyrpTU -T7Pmavrc3T4aSSde8eG6zSlmW8wM5xELfK5TeTexBKGAaDV8c2BkfenRO8OvBSvr -Gz+Xp/YzO9uGUPnbMsTVtxClmzmEj/MVpvtRdEo+dbVOSy8nk3XCu7jMjpojggPv -YQ+4CZYxYpW1T2hSFxG5Ag0EYGOSLAEQAM5kdheiwStznKiaIWaO+0PBA8bAv2xG -7qW/Di85xdcH9miHZM9+lx/iZoOBC9wZC9eatV4Hcukff700a/LGZSYVDvHvdEWb -Tv6ZwvHzbxuc1Kv8cLYopRUfOAwMYOmXriMLxVmd3fcfPNsfPRqfkaZRdkm7qTbP -DeKpSL157HbUG64Eej3cOViq49Hy9L6jtfjtZVxX7OavjnEpyezG6qSIAkvD6O7J -Yg3yfkr4sa44qohq9lDfjWpoXMebu0WsIyW11hm+7KMrBMHjlNgXppu0+ryeKfQi -FjPDBd9aflnHy2e8aHef9S5349thNGzjV3TNMV6A6oAN2XQ7pgj5DTwMZtHFCjdE -HIyfGCAgQQL0/MaFzKwuw/l/m31smZgItAZXYY1xoC2gh7LTPZ/3t2VVVof4TNXD -c+pUNgY6bwPBksuhsX8qsldDr5q3jdHZsjlycpL38Z4EZNg3BqxJlVseB395ZOQ6 -FCtHGh6rpsYQZDj1QWcUyev8NHSbSNRMS2/Nn5bT3KgEWEWrmOxp3iMmunBqmnt2 -/xJ83PKRTbSKgcG+Y/+DtnleHpRueRUPC/5XX0DNznSjF10vAh4XtBKGBNaHU9Vv -nMXlYeJ9kCMdSs7cM4FfLFMtPkFwpDYhvQRAEwt11RV6bGo5ZCgGrHGIBlNk6ZSO -1hP15hUtkWU7ABEBAAGJAiUEGAECAA8FAmBjkiwCGwwFCQPCZwAACgkQYJsCTys+ -3QfI7Q//Sb2yotfcsG5Q2FkHRBE85su01c6pewImV9bofNhATSQ37yVHUDrchm+k -Y6Pq5Tdgg+eAMcYz2yv9JhFxJyzgI0viQrkjD7oXeRTGZ0CvzxHhTakAOADXAnYt -wmJglEBTCCbUZ968kQkdBxEaUjVWPCMyIceRr8kUfiCjX51+DLESy8b5lOBhprO6 -vDukk/rmDruIpJPhJ3f89gsp2Ry7gk7a5ENIuVEElLK6OPBZhC3dDZwsvm5CYb62 -+U/b1xtmElpgGbNJCjxvAZiJ0WN2zfBXan+SJ4I9NFUw9jvSURvDV24s4YPhkbZu -OIqQEEYF8QMZ1VJlsr7BoWIXrdKDNJbmEVyx3UiYXKD1BVXCQADPu8G8EPuo4yAf -WymJAOJbAqNF2Op6+sC7/v8Xcgc3PGGyu23cZwikfCAgV+beywTPI5+eVV5F/rpx -XOlvNxT0NOg3UOeQ9GvCbD5ZcuDzmhqso0eMABeq5K5XB12xlWNaTZsIt8Dim4uK -aKMGeB+6iygkHITbay0sMUo0dX6nT27bjX5dTBo/vnVAPYuCS6rh8ojalR1fYFKA -1zdeSaJ2EW5KmgC9yedylSbHdQ+LjSY3t/Ut4RYaekIDeGmVoQkJkL7gIAs8NOYw -G3ayr0AtmeMagAMy94NH5ufVgFk+QPmXpzS7rMLQ3Is1ZOuWNrQ= -=gazS +dXJjZWZpcmUuY29tPokCPgQTAQIAKAUCZA87nwIbAwUJA8JnAAYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQzODf0h7Bqb8gjw/9FYbuwzBjuVCVhHfoY4rfCDoj +eh3NVaTdHIWO1yp6JSM/ny+Z3wDzZLtyQlBcnaJlerncS961iOEG2gBA3v8fZudN +JFpvRC1gxd9IEhGXIDDg+BeOAJUbY9LQTc/dnzWBB04nun20+lM/Rad2BlkQ+YSz +uRUaFsgk0lQPCSDQfoahtoap14jWFsa19aOjTXhAF1MGEDXuoCXM6ByH6wJjtz+z +QJrXvmHS4v8yh8z/pibLGV7IgNrtoW2ej4jFadzEEn/MDajI+5N3C2w5rD41L7Lm +j1uCIBe1G54fgJSstvBxZcnAj9qTF2FBBUpQ1q/ONFfUjpAGQKG2qh1UNBiOZNS3 +gDVN2T8h083WRN2gQvNJnJwXaF4Nm6zhmX4sUqE9nexUrDF8VG8xXJwPgZijaHPV +nZdgDZvQ47BKiJOUj80O9/qYyWo89pX6Rr/YmfbURhRe/kiPon9kIVFCzDDFPniJ +svICjpdkz7wZ0kUN+L7BtDQJfjFjTJPNA2nOV6l64DcdCiyutOFSz4Zf85GoT9wK +Mqv1UmpLwsq2FnF+Gpk1GLZCLprSCu3n16pr+gdRshnE93cvJbMGlP0+jcuFF5hr +Lsvujl7O81JrIjmGXrulHHpdrZQ4J2A3UpDDc60DOHG9ubnBnN7k2kQPY+9a1rzf +WPkMQKaxVo3uH1XRO/GJAhwEEAECAAYFAmQPQKgACgkQYJsCTys+3QcvuA//cuJX +LDfsGn9pWTCU83cF6eiQ5Id5FPKldyhSqYRgavgRov0fwD6ZU79dpURf+YsWxxtI +pIntn9hUgSgmdyUw+0GcAmFq6gJOQxWY2nij6X0A9Pskr2qW+WhMGKKVbYez65qw +fgGdlDFT/4nzVBGpIlRGGuOC0aT3jDhBXbp8Eusxi+5He7Kx2Chem7kCX9xBpUYS +FrujMlaMs8O1bsBW3xTWLpHhX6O6bpEY8zDfWavSAqCmzw5RtytAJWsAG1clU9AK +FwSKC+10ODo5VFzmRSgF727Gtuow1WnPhFM/7Cn+M+knCTm2vRz6Vz29/a6DUrZl +CbyKGPR8a9C3UG4VT8C3+fi1boZ+/trUw27YtrKp70FDy3UdgLDF2eO9B77vs35n ++hf2EipG407CGBqb8q6boOdxC0BN/Fcy30Oms4DSUTqEiqvSA/35BhyGfOmJb5tt +kMEHLPveJvilICKBMQdYHemR3mk+muzAO7+y4VOKl+rP0xXCp6y6PAiEu14lzxzI +isQu6omEJBOUiad2iZz+4OUU1Dil0YgUpNgJQyKaDUOR0MSzFU9IM5pzZJ14XkdG +6iriPEX1V9SlfZlaJDNlN11vFlVFeu02vJTcddAaHYad2tKD09GAEuZkib0ToWxz +S+4cBxojti6vMUHVSIlbov7ZMHd/WMqQUb1tSl65Ag0EZA87nwEQALkEL5rxEnv7 +rcwcF3KwcppfHTWjkTV0dyMmE/kLf9e3QnMdCaiZMypxmYipOe9Z/9G6YGH+Qujp +N0mzenNgKljs961VTbOUYTusgwTz1qFienX8lg+eYRQIpqPjisb1xGlISojI7vWO +FZT/LrxVI6Y+HLSXkZjPD7TqyefgOlP2YchmFAjC/e+rtKAZ+FLlguotvDRxl/zp +AA8LLFup8Y8+BvQIWiy6jwwAjJMiJdwBtUz1OxpMuGU/C6bWCkAAFKjhC5F9JQEI +9jHh7/cQEGabDmjIGfywj9jniJrP79hrLfuryFvo6qbw7EwirJbKpoHJwS03ei29 +Uwttw2Dn41dZ0MvjfpYwI61cE5NpvKCBJkkEho6SDXGvLABerEu3ASGlYybQOzrg +aHO9AxGXgD2tFjI0NNunVxy/0KQ+kWcdQ1p/dk/O2U6w5CfFHU68aZgAxmj7jngx +YKjs3IAUy8mwkxtyyFiLJ3E19NdB8+t0cjJMtDVtXOgmoi7HaP8RghdaitaI4q/z +ocIAWhJhN7IkzrYWJ/Bkq4j0doKmaDR8GPP3i5Keg1c1z4yGX1c9MWTMy49l5Nwl +/bUjUiIRocCc33dZCqL5KPMBdtLJOUiIG/KZoMqr6Ozxyriv4Nn/CT2/SSvatYtP +SN91kt61c2FmoBBSltiFwncbUVmB3HmDABEBAAGJAiUEGAECAA8FAmQPO58CGwwF +CQPCZwAACgkQzODf0h7Bqb/ueQ/7BofldLW0/GqvTMEDnysUB/tchWzae6LnBeur +EhIB6smOVkMiuzrRLl2/vFVmv6H1UZK2fRPpaI/3V2mg+ML5ioVVgBrg3IQxcDpY +sYiictUFXJQ9y/ygAl8zxbkE4v4BWAwk5kIFWw1q/sb3IUc07GeK16PLY0+ocPdV +vMyiV8w5wKBlkyPwdntjuJEyfU3lsIeR2iBcQe4HL1Y0/pm6Ilpn+uj2ZYlYZzhN +zBuLy9HB3it161KP/RyxWNB1AEAAx8Mh0IhHOEWLvbfjHJxkJ2GX0TgL5wa45l2a +3clP4Dw2MpLfzIHs+CxG7t6IdSvoX1+0gZPvmo9JXDsLNa7+uu/lcCUjXY9TWdvc +VIZRwlSBQQC8WnGpbkvsBDsJ2BskPWOmv0ol3aiiekJJhVT1K9M1ZwDGX1ts8hLr +mf0kCFDq0RImCg6WZAM6z3Fg/1pPGPRktJ4tmSui3GYzrVA34gTunvlqPYKCFYHA +EdUdqycz7UAroj7k3OndZGnnT2r/qKaIYF53/u+6SXM/lUSrJfwxG9eXiw80P/YW +K9VjT3CbQA74vz7pC1bxpYDas6w39DRpkYR1bn1GIhmJhK2CUj5FQla+opVN2Wmg +sk0O7hoet7RDvKpoUyBHxHOJseDQEzWc38bOxD+x0vz/MirBnLdBx8g836tgqy7h +ab6V2qU= +=X+5e -----END PGP PUBLIC KEY BLOCK----- diff -Nru clamav-1.0.1+dfsg/.gitattributes clamav-1.0.2+dfsg/.gitattributes --- clamav-1.0.1+dfsg/.gitattributes 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/.gitattributes 2023-08-15 22:24:07.000000000 +0000 @@ -9,6 +9,9 @@ # Files that should be left untouched (binary is macro for -text -diff) *.ref binary +# Preserve signature for .cargo/vendor files (from the tarabll) ++/.cargo/vendor binary + # # Exclude files from exporting # diff -Nru clamav-1.0.1+dfsg/.gitignore clamav-1.0.2+dfsg/.gitignore --- clamav-1.0.1+dfsg/.gitignore 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/.gitignore 2023-08-15 22:24:07.000000000 +0000 @@ -228,9 +228,5 @@ debug/ target/ -# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries -# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html -Cargo.lock - # These are backup files generated by rustfmt **/*.rs.bk diff -Nru clamav-1.0.1+dfsg/Jenkinsfile clamav-1.0.2+dfsg/Jenkinsfile --- clamav-1.0.1+dfsg/Jenkinsfile 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/Jenkinsfile 2023-08-15 22:24:07.000000000 +0000 @@ -10,7 +10,7 @@ parameters( [ string(name: 'VERSION', - defaultValue: '1.0.1', + defaultValue: '1.0.2', description: 'ClamAV version string'), string(name: 'FRAMEWORK_BRANCH', defaultValue: '1.0', @@ -37,7 +37,7 @@ defaultValue: 'fuzz-regression-1.0', description: 'test-pipelines branch for fuzz regression tests'), string(name: 'FUZZ_CORPUS_BRANCH', - defaultValue: 'master', + defaultValue: '1.0', description: 'private-fuzz-corpus branch'), string(name: 'APPCHECK_PIPELINE', defaultValue: 'appcheck-1.0', diff -Nru clamav-1.0.1+dfsg/libclamav/autoit.c clamav-1.0.2+dfsg/libclamav/autoit.c --- clamav-1.0.1+dfsg/libclamav/autoit.c 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav/autoit.c 2023-08-15 22:24:07.000000000 +0000 @@ -761,6 +761,10 @@ cli_dbgmsg("autoit: file is compressed\n"); if (cli_readint32(UNP.inputbuf) != 0x35304145) { cli_dbgmsg("autoit: bad magic or unsupported version\n"); + // Free this inputbuf and set back to NULL. + free(UNP.inputbuf); + UNP.inputbuf = NULL; + continue; } @@ -769,6 +773,10 @@ } if (cli_checklimits("autoit", ctx, UNP.usize, 0, 0) != CL_CLEAN) { + // Free this inputbuf and set back to NULL. + free(UNP.inputbuf); + UNP.inputbuf = NULL; + continue; } @@ -848,12 +856,16 @@ */ cli_dbgmsg("autoit: file is not compressed\n"); UNP.outputbuf = UNP.inputbuf; - UNP.usize = UNP.csize; + UNP.inputbuf = NULL; + + UNP.usize = UNP.csize; } if (UNP.usize < 4) { cli_dbgmsg("autoit: file is too short\n"); free(UNP.outputbuf); + UNP.outputbuf = NULL; + continue; } diff -Nru clamav-1.0.1+dfsg/libclamav/bytecode_api.h clamav-1.0.2+dfsg/libclamav/bytecode_api.h --- clamav-1.0.1+dfsg/libclamav/bytecode_api.h 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav/bytecode_api.h 2023-08-15 22:24:07.000000000 +0000 @@ -165,6 +165,7 @@ FUNC_LEVEL_1_0 = 160, /**< LibClamAV release 1.0.0 */ FUNC_LEVEL_1_0_1 = 161, /**< LibClamAV release 1.0.1 */ + FUNC_LEVEL_1_0_2 = 162, /**< LibClamAV release 1.0.2 */ }; /** diff -Nru clamav-1.0.1+dfsg/libclamav/hfsplus.c clamav-1.0.2+dfsg/libclamav/hfsplus.c --- clamav-1.0.1+dfsg/libclamav/hfsplus.c 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav/hfsplus.c 2023-08-15 22:24:07.000000000 +0000 @@ -1323,6 +1323,11 @@ stream.next_out = uncompressed_block; extracted_file = true; + + if (stream.avail_in > 0 && Z_STREAM_END == z_ret) { + cli_dbgmsg("hfsplus_walk_catalog: Reached end of stream even though there's still some available bytes left!\n"); + break; + } } } else { if (cli_writen(ofd, &block[streamBeginning ? 1 : 0], readLen - (streamBeginning ? 1 : 0)) != readLen - (streamBeginning ? 1 : 0)) { diff -Nru clamav-1.0.1+dfsg/libclamav/matcher-ac.c clamav-1.0.2+dfsg/libclamav/matcher-ac.c --- clamav-1.0.1+dfsg/libclamav/matcher-ac.c 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav/matcher-ac.c 2023-08-15 22:24:07.000000000 +0000 @@ -2951,6 +2951,7 @@ cli_warnmsg("cli_ac_addsig: cannot use filter for trie\n"); MPOOL_FREE(root->mempool, root->filter); root->filter = NULL; + return CL_EMALFDB; } /* TODO: should this affect maxpatlen? */ diff -Nru clamav-1.0.1+dfsg/libclamav/matcher-bm.c clamav-1.0.2+dfsg/libclamav/matcher-bm.c --- clamav-1.0.1+dfsg/libclamav/matcher-bm.c 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav/matcher-bm.c 2023-08-15 22:24:07.000000000 +0000 @@ -72,6 +72,7 @@ cli_warnmsg("cli_bm_addpatt: cannot use filter for trie\n"); MPOOL_FREE(root->mempool, root->filter); root->filter = NULL; + return CL_EMALFDB; } /* TODO: should this affect maxpatlen? */ } diff -Nru clamav-1.0.1+dfsg/libclamav/readdb.c clamav-1.0.2+dfsg/libclamav/readdb.c --- clamav-1.0.1+dfsg/libclamav/readdb.c 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav/readdb.c 2023-08-15 22:24:07.000000000 +0000 @@ -4862,9 +4862,11 @@ if (fs) fclose(fs); - if (engine->cb_sigload_progress) { - /* Let the progress callback function know how we're doing */ - (void)engine->cb_sigload_progress(engine->num_total_signatures, *signo, engine->cb_sigload_progress_ctx); + if (CL_SUCCESS == ret) { + if (engine->cb_sigload_progress) { + /* Let the progress callback function know how we're doing */ + (void)engine->cb_sigload_progress(engine->num_total_signatures, *signo, engine->cb_sigload_progress_ctx); + } } return ret; diff -Nru clamav-1.0.1+dfsg/libclamav/rtf.c clamav-1.0.2+dfsg/libclamav/rtf.c --- clamav-1.0.1+dfsg/libclamav/rtf.c 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav/rtf.c 2023-08-15 22:24:07.000000000 +0000 @@ -168,9 +168,11 @@ /* grow stack */ struct rtf_state* states; stack->stack_size += 128; - states = cli_realloc2(stack->states, stack->stack_size * sizeof(*stack->states)); - if (!states) + states = cli_realloc(stack->states, stack->stack_size * sizeof(*stack->states)); + if (!states) { + // Realloc failed. Note that stack->states has not been freed and must still be cleaned up by the caller. return CL_EMEM; + } stack->states = states; } stack->states[stack->stack_cnt++] = *state; diff -Nru clamav-1.0.1+dfsg/libclamav/scanners.c clamav-1.0.2+dfsg/libclamav/scanners.c --- clamav-1.0.1+dfsg/libclamav/scanners.c 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav/scanners.c 2023-08-15 22:24:07.000000000 +0000 @@ -1632,7 +1632,8 @@ char *hash = NULL; char path[PATH_MAX]; char filename[PATH_MAX]; - int tempfd = -1; + int tempfd = -1; + char *tempfile = NULL; if (CL_SUCCESS != (ret = uniq_get(U, "dir", 3, &hash, &hashcnt))) { cli_dbgmsg("cli_ole2_tempdir_scan_vba_new: uniq_get('dir') failed with ret code (%d)!\n", ret); @@ -1649,7 +1650,7 @@ if (CL_SUCCESS == find_file(filename, dir, path, sizeof(path))) { cli_dbgmsg("cli_ole2_tempdir_scan_vba_new: Found dir file: %s\n", path); - if ((ret = cli_vba_readdir_new(ctx, path, U, hash, hashcnt, &tempfd, has_macros)) != CL_SUCCESS) { + if ((ret = cli_vba_readdir_new(ctx, path, U, hash, hashcnt, &tempfd, has_macros, &tempfile)) != CL_SUCCESS) { // FIXME: Since we only know the stream name of the OLE2 stream, but not its path inside the // OLE2 archive, we don't know if we have the right file. The only thing we can do is // iterate all of them until one succeeds. @@ -1693,6 +1694,14 @@ close(tempfd); tempfd = -1; + + if (tempfile) { + if (!ctx->engine->keeptmp) { + remove(tempfile); + } + free(tempfile); + tempfile = NULL; + } } hashcnt--; @@ -1704,6 +1713,14 @@ tempfd = -1; } + if (tempfile) { + if (!ctx->engine->keeptmp) { + remove(tempfile); + } + free(tempfile); + tempfile = NULL; + } + return ret; } diff -Nru clamav-1.0.1+dfsg/libclamav/vba_extract.c clamav-1.0.2+dfsg/libclamav/vba_extract.c --- clamav-1.0.1+dfsg/libclamav/vba_extract.c 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav/vba_extract.c 2023-08-15 22:24:07.000000000 +0000 @@ -358,7 +358,7 @@ * Read a VBA project in an OLE directory. * Contrary to cli_vba_readdir, this function uses the dir file to locate VBA modules. */ -cl_error_t cli_vba_readdir_new(cli_ctx *ctx, const char *dir, struct uniq *U, const char *hash, uint32_t which, int *tempfd, int *has_macros) +cl_error_t cli_vba_readdir_new(cli_ctx *ctx, const char *dir, struct uniq *U, const char *hash, uint32_t which, int *tempfd, int *has_macros, char **tempfile) { cl_error_t ret = CL_SUCCESS; char fullname[1024]; @@ -367,7 +367,6 @@ size_t data_len; size_t data_offset; const char *stream_name = NULL; - char *tempfile = NULL; uint16_t codepage = CODEPAGE_ISO8859_1; unsigned i; char *mbcs_name = NULL, *utf16_name = NULL; @@ -375,7 +374,7 @@ unsigned char *module_data = NULL, *module_data_utf8 = NULL; size_t module_data_size = 0, module_data_utf8_size = 0; - if (dir == NULL || hash == NULL || tempfd == NULL || has_macros == NULL) { + if (dir == NULL || hash == NULL || tempfd == NULL || has_macros == NULL || tempfile == NULL) { return CL_EARG; } @@ -398,12 +397,12 @@ *has_macros = *has_macros + 1; - if ((ret = cli_gentempfd_with_prefix(ctx->sub_tmpdir, "vba_project", &tempfile, tempfd)) != CL_SUCCESS) { + if ((ret = cli_gentempfd_with_prefix(ctx->sub_tmpdir, "vba_project", tempfile, tempfd)) != CL_SUCCESS) { cli_warnmsg("vba_readdir_new: VBA project cannot be dumped to file\n"); goto done; } - cli_dbgmsg("Dumping VBA project from dir %s to file %s\n", fullname, tempfile); + cli_dbgmsg("Dumping VBA project from dir %s to file %s\n", fullname, *tempfile); #define CLI_WRITEN(msg, size) \ do { \ @@ -1305,9 +1304,6 @@ if (stream_name) { free((void *)stream_name); } - if (tempfile) { - free(tempfile); - } if (ret != CL_SUCCESS && *tempfd >= 0) { close(*tempfd); *tempfd = -1; @@ -1972,7 +1968,7 @@ uint32_t len __attribute__((packed)); uint32_t state __attribute__((packed)); uint32_t offset __attribute__((packed)); - } * m; + } *m; const struct macro *n; #ifdef HAVE_PRAGMA_PACK #pragma pack() diff -Nru clamav-1.0.1+dfsg/libclamav/vba_extract.h clamav-1.0.2+dfsg/libclamav/vba_extract.h --- clamav-1.0.1+dfsg/libclamav/vba_extract.h 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav/vba_extract.h 2023-08-15 22:24:07.000000000 +0000 @@ -40,7 +40,7 @@ } vba_project_t; vba_project_t *cli_vba_readdir(const char *dir, struct uniq *U, uint32_t which); -cl_error_t cli_vba_readdir_new(cli_ctx *ctx, const char *dir, struct uniq *U, const char *hash, uint32_t which, int *tempfd, int *has_macros); +cl_error_t cli_vba_readdir_new(cli_ctx *ctx, const char *dir, struct uniq *U, const char *hash, uint32_t which, int *tempfd, int *has_macros, char **tempfile); vba_project_t *cli_wm_readdir(int fd); void cli_free_vba_project(vba_project_t *vba_project); diff -Nru clamav-1.0.1+dfsg/libclamav/xlm_extract.c clamav-1.0.2+dfsg/libclamav/xlm_extract.c --- clamav-1.0.1+dfsg/libclamav/xlm_extract.c 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav/xlm_extract.c 2023-08-15 22:24:07.000000000 +0000 @@ -4994,6 +4994,9 @@ FREE(data); + if (tempfile && !ctx->engine->keeptmp) { + remove(tempfile); + } FREE(tempfile); return status; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/build.rs clamav-1.0.2+dfsg/libclamav_rust/build.rs --- clamav-1.0.1+dfsg/libclamav_rust/build.rs 2023-02-13 06:00:26.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/build.rs 2023-08-15 22:24:07.000000000 +0000 @@ -185,7 +185,7 @@ // LLVM is optional, and don't have a path to each library like we do with the other libs. let llvm_libs = env::var("LLVM_LIBS").unwrap_or("".into()); - if llvm_libs != "" { + if !llvm_libs.is_empty() { match env::var("LLVM_DIRS") { Err(env::VarError::NotPresent) => eprintln!("LLVM_DIRS not set"), Err(env::VarError::NotUnicode(_)) => return Err("environment value not unicode"), @@ -202,7 +202,7 @@ llvm_libs .split(',') - .for_each(|filepath_str| match parse_lib_path(&filepath_str) { + .for_each(|filepath_str| match parse_lib_path(filepath_str) { Ok(parsed_path) => { println!("cargo:rustc-link-search={}", parsed_path.dir); eprintln!(" - requesting that rustc link {:?}", &parsed_path.libname); @@ -281,7 +281,7 @@ // Parse a library path, returning the portion expected after the `-l`, and the // directory containing the library -fn parse_lib_path<'a>(path: &'a str) -> Result { +fn parse_lib_path(path: &str) -> Result { let path = PathBuf::from(path); let file_name = path .file_name() diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/.cargo-checksum.json clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/.cargo-checksum.json --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/.cargo-checksum.json 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{"COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.toml":"c9b1b15e299ba4e6ed0d6f25cde30b26b13b6068a7fbd980000c37bca19b0104","DESIGN.md":"64ff45ea2a89d4c32b29af91acb7743a861fcac417cb94fde8e6559405d603b2","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"5999e5768f5da8ab9b50c016766b5185b4c79936c56bef6d311ddcb0a38c4b94","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/ahocorasick.rs":"b92c9a65c4ee8029ff5a710aa1514caf838e73072c177dff5375463769f0b1ce","src/automaton.rs":"931af0aad03079bc4f6400d573fce832ce1edeeaf196815a16750d57b54b2183","src/buffer.rs":"dae7ee7c1f846ca9cf115ba4949484000e1837b4fb7311f8d8c9a35011c9c26f","src/byte_frequencies.rs":"2fb85b381c038c1e44ce94294531cdcd339dca48b1e61f41455666e802cbbc9e","src/classes.rs":"99a53a2ed8eea8c13699def90e31dfdff9d0b90572b1db3cb534e3396e7a0ed0","src/dfa.rs":"25e4455b3e179a7e192108d05f3683993456b36e3ebed99f827558c52525b7e6","src/error.rs":"d34c2c9c815df5d9dedc46b4b3ce109cd2cee07825de643f0c574ec960367beb","src/lib.rs":"7a47d4c87f83e0e7ddf0777a71e4858904e73477ce18404cb89e656070e86aef","src/nfa.rs":"3b817b4aa85540e8c0d35aff7ed7cfbab70ec7d2aaa779d63b4f5369bff31ce1","src/packed/api.rs":"df42e7500c94c9de1ac44145a0dd99ea02047e6bba229da12f2575337beebcf0","src/packed/mod.rs":"ad2f8e18996737347a1181a4457387276d139315bcabfc5e34494af0c0319701","src/packed/pattern.rs":"3abf3835d4c4f8a43753c52936a894d819f713f233fc046e19de5ef95200dcce","src/packed/rabinkarp.rs":"ad7d4533f96aed336e29c5553657ae57b0d733ace9c707a6cf4d08d8fc6edee5","src/packed/teddy/README.md":"b4b83fb5afafbbea6cb76fe70f49cc8ced888f682d98abe5ea5773e95d9ec2b0","src/packed/teddy/compile.rs":"aad40b3f93d2c388b409b31fb2795d414a365237789d5b1a7510d97ceb8ce260","src/packed/teddy/mod.rs":"83b52bd80272970ad17234d0db293d17c1710ec582302bf516b203c8edec037e","src/packed/teddy/runtime.rs":"836146e90b320b14fa2c65fe4af7915a41f6fb04408aac5fac731c22ff46adae","src/packed/tests.rs":"b8dc4d3281ecd6d0fa2bf7ef16cf292a467dfdce64e470c7921e983bfa60fee2","src/packed/vector.rs":"ab3c0535fca5f09198d58cbfae44c292aeb3ce44bc92bca36d30dc72963639fc","src/prefilter.rs":"82a3eb6d5c0c3f10bc8d5f57d55d6d14cf4cf21c475bb5253e1921084063b8d7","src/state_id.rs":"519ec8c7bf3fa72103d4c561c193759759f535dca924c9853efe630f406d2029","src/tests.rs":"ee9b85f3c27cb2fba5796e5be8019aafecc13ee9a4f614553f2bc8953f51c6de"},"package":"cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac"} \ No newline at end of file diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/Cargo.toml clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/Cargo.toml --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/Cargo.toml 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "aho-corasick" -version = "0.7.20" -authors = ["Andrew Gallant "] -exclude = ["/aho-corasick-debug"] -autotests = false -description = "Fast multiple substring searching." -homepage = "https://github.com/BurntSushi/aho-corasick" -readme = "README.md" -keywords = [ - "string", - "search", - "text", - "aho", - "multi", -] -categories = ["text-processing"] -license = "Unlicense OR MIT" -repository = "https://github.com/BurntSushi/aho-corasick" - -[profile.bench] -debug = true - -[profile.release] -debug = true - -[lib] -name = "aho_corasick" - -[dependencies.memchr] -version = "2.4.0" -default-features = false - -[dev-dependencies] - -[features] -default = ["std"] -std = ["memchr/std"] diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/COPYING clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/COPYING --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/COPYING 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/COPYING 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -This project is dual-licensed under the Unlicense and MIT licenses. - -You may use this code under the terms of either license. diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/DESIGN.md clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/DESIGN.md --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/DESIGN.md 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/DESIGN.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,483 +0,0 @@ -This document describes the internal design of this crate, which is an object -lesson in what happens when you take a fairly simple old algorithm like -Aho-Corasick and make it fast and production ready. - -The target audience of this document is Rust programmers that have some -familiarity with string searching, however, one does not need to know the -Aho-Corasick algorithm in order to read this (it is explained below). One -should, however, know what a trie is. (If you don't, go read its Wikipedia -article.) - -The center-piece of this crate is an implementation of Aho-Corasick. On its -own, Aho-Corasick isn't that complicated. The complex pieces come from the -different variants of Aho-Corasick implemented in this crate. Specifically, -they are: - -* Aho-Corasick as an NFA, using dense transitions near the root with sparse - transitions elsewhere. -* Aho-Corasick as a DFA. (An NFA is slower to search, but cheaper to construct - and uses less memory.) - * A DFA with pre-multiplied state identifiers. This saves a multiplication - instruction in the core search loop. - * A DFA with equivalence classes of bytes as the alphabet, instead of the - traditional 256-byte alphabet. This shrinks the size of the DFA in memory, - but adds an extra lookup in the core search loop to map the input byte to - an equivalent class. -* The option to choose how state identifiers are represented, via one of - u8, u16, u32, u64 or usize. This permits creating compact automatons when - matching a small number of patterns. -* Supporting "standard" match semantics, along with its overlapping variant, - in addition to leftmost-first and leftmost-longest semantics. The "standard" - semantics are typically what you see in a textbook description of - Aho-Corasick. However, Aho-Corasick is also useful as an optimization in - regex engines, which often use leftmost-first or leftmost-longest semantics. - Thus, it is useful to implement those semantics here. The "standard" and - "leftmost" search algorithms are subtly different, and also require slightly - different construction algorithms. -* Support for ASCII case insensitive matching. -* Support for accelerating searches when the patterns all start with a small - number of fixed bytes. Or alternatively, when the patterns all contain a - small number of rare bytes. (Searching for these bytes uses SIMD vectorized - code courtesy of `memchr`.) -* Transparent support for alternative SIMD vectorized search routines for - smaller number of literals, such as the Teddy algorithm. We called these - "packed" search routines because they use SIMD. They can often be an order of - magnitude faster than just Aho-Corasick, but don't scale as well. -* Support for searching streams. This can reuse most of the underlying code, - but does require careful buffering support. -* Support for anchored searches, which permit efficient `is_prefix` checks for - a large number of patterns. - -When you combine all of this together along with trying to make everything as -fast as possible, what you end up with is enitrely too much code with too much -`unsafe`. Alas, I was not smart enough to figure out how to reduce it. Instead, -we will explain it. - - -# Basics - -The fundamental problem this crate is trying to solve is to determine the -occurrences of possibly many patterns in a haystack. The naive way to solve -this is to look for a match for each pattern at each position in the haystack: - - for i in 0..haystack.len(): - for p in patterns.iter(): - if haystack[i..].starts_with(p.bytes()): - return Match(p.id(), i, i + p.bytes().len()) - -Those four lines are effectively all this crate does. The problem with those -four lines is that they are very slow, especially when you're searching for a -large number of patterns. - -While there are many different algorithms available to solve this, a popular -one is Aho-Corasick. It's a common solution because it's not too hard to -implement, scales quite well even when searching for thousands of patterns and -is generally pretty fast. Aho-Corasick does well here because, regardless of -the number of patterns you're searching for, it always visits each byte in the -haystack exactly once. This means, generally speaking, adding more patterns to -an Aho-Corasick automaton does not make it slower. (Strictly speaking, however, -this is not true, since a larger automaton will make less effective use of the -CPU's cache.) - -Aho-Corasick can be succinctly described as a trie with state transitions -between some of the nodes that efficiently instruct the search algorithm to -try matching alternative keys in the automaton. The trick is that these state -transitions are arranged such that each byte of input needs to be inspected -only once. These state transitions are typically called "failure transitions," -because they instruct the searcher (the thing traversing the automaton while -reading from the haystack) what to do when a byte in the haystack does not -correspond to a valid transition in the current state of the trie. - -More formally, a failure transition points to a state in the automaton that may -lead to a match whose prefix is a proper suffix of the path traversed through -the trie so far. (If no such proper suffix exists, then the failure transition -points back to the start state of the trie, effectively restarting the search.) -This is perhaps simpler to explain pictorally. For example, let's say we built -an Aho-Corasick automaton with the following patterns: 'abcd' and 'cef'. The -trie looks like this: - - a - S1 - b - S2 - c - S3 - d - S4* - / - S0 - c - S5 - e - S6 - f - S7* - -where states marked with a `*` are match states (meaning, the search algorithm -should stop and report a match to the caller). - -So given this trie, it should be somewhat straight-forward to see how it can -be used to determine whether any particular haystack *starts* with either -`abcd` or `cef`. It's easy to express this in code: - - fn has_prefix(trie: &Trie, haystack: &[u8]) -> bool { - let mut state_id = trie.start(); - // If the empty pattern is in trie, then state_id is a match state. - if trie.is_match(state_id) { - return true; - } - for (i, &b) in haystack.iter().enumerate() { - state_id = match trie.next_state(state_id, b) { - Some(id) => id, - // If there was no transition for this state and byte, then we know - // the haystack does not start with one of the patterns in our trie. - None => return false, - }; - if trie.is_match(state_id) { - return true; - } - } - false - } - -And that's pretty much it. All we do is move through the trie starting with the -bytes at the beginning of the haystack. If we find ourselves in a position -where we can't move, or if we've looked through the entire haystack without -seeing a match state, then we know the haystack does not start with any of the -patterns in the trie. - -The meat of the Aho-Corasick algorithm is in how we add failure transitions to -our trie to keep searching efficient. Specifically, it permits us to not only -check whether a haystack *starts* with any one of a number of patterns, but -rather, whether the haystack contains any of a number of patterns *anywhere* in -the haystack. - -As mentioned before, failure transitions connect a proper suffix of the path -traversed through the trie before, with a path that leads to a match that has a -prefix corresponding to that proper suffix. So in our case, for patterns `abcd` -and `cef`, with a haystack `abcef`, we want to transition to state `S5` (from -the diagram above) from `S3` upon seeing that the byte following `c` is not -`d`. Namely, the proper suffix in this example is `c`, which is a prefix of -`cef`. So the modified diagram looks like this: - - - a - S1 - b - S2 - c - S3 - d - S4* - / / - / ---------------- - / / - S0 - c - S5 - e - S6 - f - S7* - -One thing that isn't shown in this diagram is that *all* states have a failure -transition, but only `S3` has a *non-trivial* failure transition. That is, all -other states have a failure transition back to the start state. So if our -haystack was `abzabcd`, then the searcher would transition back to `S0` after -seeing `z`, which effectively restarts the search. (Because there is no pattern -in our trie that has a prefix of `bz` or `z`.) - -The code for traversing this *automaton* or *finite state machine* (it is no -longer just a trie) is not that much different from the `has_prefix` code -above: - - fn contains(fsm: &FiniteStateMachine, haystack: &[u8]) -> bool { - let mut state_id = fsm.start(); - // If the empty pattern is in fsm, then state_id is a match state. - if fsm.is_match(state_id) { - return true; - } - for (i, &b) in haystack.iter().enumerate() { - // While the diagram above doesn't show this, we may wind up needing - // to follow multiple failure transitions before we land on a state - // in which we can advance. Therefore, when searching for the next - // state, we need to loop until we don't see a failure transition. - // - // This loop terminates because the start state has no empty - // transitions. Every transition from the start state either points to - // another state, or loops back to the start state. - loop { - match fsm.next_state(state_id, b) { - Some(id) => { - state_id = id; - break; - } - // Unlike our code above, if there was no transition for this - // state, then we don't quit. Instead, we look for this state's - // failure transition and follow that instead. - None => { - state_id = fsm.next_fail_state(state_id); - } - }; - } - if fsm.is_match(state_id) { - return true; - } - } - false - } - -Other than the complication around traversing failure transitions, this code -is still roughly "traverse the automaton with bytes from the haystack, and quit -when a match is seen." - -And that concludes our section on the basics. While we didn't go deep into -how the automaton is built (see `src/nfa.rs`, which has detailed comments about -that), the basic structure of Aho-Corasick should be reasonably clear. - - -# NFAs and DFAs - -There are generally two types of finite automata: non-deterministic finite -automata (NFA) and deterministic finite automata (DFA). The difference between -them is, principally, that an NFA can be in multiple states at once. This is -typically accomplished by things called _epsilon_ transitions, where one could -move to a new state without consuming any bytes from the input. (The other -mechanism by which NFAs can be in more than one state is where the same byte in -a particular state transitions to multiple distinct states.) In contrast, a DFA -can only ever be in one state at a time. A DFA has no epsilon transitions, and -for any given state, a byte transitions to at most one other state. - -By this formulation, the Aho-Corasick automaton described in the previous -section is an NFA. This is because failure transitions are, effectively, -epsilon transitions. That is, whenever the automaton is in state `S`, it is -actually in the set of states that are reachable by recursively following -failure transitions from `S`. (This means that, for example, the start state -is always active since the start state is reachable via failure transitions -from any state in the automaton.) - -NFAs have a lot of nice properties. They tend to be easier to construct, and -also tend to use less memory. However, their primary downside is that they are -typically slower to execute. For example, the code above showing how to search -with an Aho-Corasick automaton needs to potentially iterate through many -failure transitions for every byte of input. While this is a fairly small -amount of overhead, this can add up, especially if the automaton has a lot of -overlapping patterns with a lot of failure transitions. - -A DFA's search code, by contrast, looks like this: - - fn contains(dfa: &DFA, haystack: &[u8]) -> bool { - let mut state_id = dfa.start(); - // If the empty pattern is in dfa, then state_id is a match state. - if dfa.is_match(state_id) { - return true; - } - for (i, &b) in haystack.iter().enumerate() { - // An Aho-Corasick DFA *never* has a missing state that requires - // failure transitions to be followed. One byte of input advances the - // automaton by one state. Always. - state_id = dfa.next_state(state_id, b); - if dfa.is_match(state_id) { - return true; - } - } - false - } - -The search logic here is much simpler than for the NFA, and this tends to -translate into significant performance benefits as well, since there's a lot -less work being done for each byte in the haystack. How is this accomplished? -It's done by pre-following all failure transitions for all states for all bytes -in the alphabet, and then building a single state transition table. Building -this DFA can be much more costly than building the NFA, and use much more -memory, but the better performance can be worth it. - -Users of this crate can actually choose between using an NFA or a DFA. By -default, an NFA is used, because it typically strikes the best balance between -space usage and search performance. But the DFA option is available for cases -where a little extra memory and upfront time building the automaton is okay. -For example, the `AhoCorasick::auto_configure` and -`AhoCorasickBuilder::auto_configure` methods will enable the DFA setting if -there are a small number of patterns. - - -# More DFA tricks - -As described in the previous section, one of the downsides of using a DFA -is that it uses more memory and can take longer to build. One small way of -mitigating these concerns is to map the alphabet used by the automaton into -a smaller space. Typically, the alphabet of a DFA has 256 elements in it: -one element for each possible value that fits into a byte. However, in many -cases, one does not need the full alphabet. For example, if all patterns in an -Aho-Corasick automaton are ASCII letters, then this only uses up 52 distinct -bytes. As far as the automaton is concerned, the rest of the 204 bytes are -indistinguishable from one another: they will never disrciminate between a -match or a non-match. Therefore, in cases like that, the alphabet can be shrunk -to just 53 elements. One for each ASCII letter, and then another to serve as a -placeholder for every other unused byte. - -In practice, this library doesn't quite compute the optimal set of equivalence -classes, but it's close enough in most cases. The key idea is that this then -allows the transition table for the DFA to be potentially much smaller. The -downside of doing this, however, is that since the transition table is defined -in terms of this smaller alphabet space, every byte in the haystack must be -re-mapped to this smaller space. This requires an additional 256-byte table. -In practice, this can lead to a small search time hit, but it can be difficult -to measure. Moreover, it can sometimes lead to faster search times for bigger -automata, since it could be difference between more parts of the automaton -staying in the CPU cache or not. - -One other trick for DFAs employed by this crate is the notion of premultiplying -state identifiers. Specifically, the normal way to compute the next transition -in a DFA is via the following (assuming that the transition table is laid out -sequentially in memory, in row-major order, where the rows are states): - - next_state_id = dfa.transitions[current_state_id * 256 + current_byte] - -However, since the value `256` is a fixed constant, we can actually premultiply -the state identifiers in the table when we build the table initially. Then, the -next transition computation simply becomes: - - next_state_id = dfa.transitions[current_state_id + current_byte] - -This doesn't seem like much, but when this is being executed for every byte of -input that you're searching, saving that extra multiplication instruction can -add up. - -The same optimization works even when equivalence classes are enabled, as -described above. The only difference is that the premultiplication is by the -total number of equivalence classes instead of 256. - -There isn't much downside to premultiplying state identifiers, other than the -fact that you may need to choose a bigger integer representation than you would -otherwise. For example, if you don't premultiply state identifiers, then an -automaton that uses `u8` as a state identifier can hold up to 256 states. -However, if they are premultiplied, then it can only hold up to -`floor(256 / len(alphabet))` states. Thus premultiplication impacts how compact -your DFA can be. In practice, it's pretty rare to use `u8` as a state -identifier, so premultiplication is usually a good thing to do. - -Both equivalence classes and premultiplication are tuneable parameters via the -`AhoCorasickBuilder` type, and both are enabled by default. - - -# Match semantics - -One of the more interesting things about this implementation of Aho-Corasick -that (as far as this author knows) separates it from other implementations, is -that it natively supports leftmost-first and leftmost-longest match semantics. -Briefly, match semantics refer to the decision procedure by which searching -will disambiguate matches when there are multiple to choose from: - -* **standard** match semantics emits matches as soon as they are detected by - the automaton. This is typically equivalent to the textbook non-overlapping - formulation of Aho-Corasick. -* **leftmost-first** match semantics means that 1) the next match is the match - starting at the leftmost position and 2) among multiple matches starting at - the same leftmost position, the match corresponding to the pattern provided - first by the caller is reported. -* **leftmost-longest** is like leftmost-first, except when there are multiple - matches starting at the same leftmost position, the pattern corresponding to - the longest match is returned. - -(The crate API documentation discusses these differences, with examples, in -more depth on the `MatchKind` type.) - -The reason why supporting these match semantics is important is because it -gives the user more control over the match procedure. For example, -leftmost-first permits users to implement match priority by simply putting the -higher priority patterns first. Leftmost-longest, on the other hand, permits -finding the longest possible match, which might be useful when trying to find -words matching a dictionary. Additionally, regex engines often want to use -Aho-Corasick as an optimization when searching for an alternation of literals. -In order to preserve correct match semantics, regex engines typically can't use -the standard textbook definition directly, since regex engines will implement -either leftmost-first (Perl-like) or leftmost-longest (POSIX) match semantics. - -Supporting leftmost semantics requires a couple key changes: - -* Constructing the Aho-Corasick automaton changes a bit in both how the trie is - constructed and how failure transitions are found. Namely, only a subset of - the failure transitions are added. Specifically, only the failure transitions - that either do not occur after a match or do occur after a match but preserve - that match are kept. (More details on this can be found in `src/nfa.rs`.) -* The search algorithm changes slightly. Since we are looking for the leftmost - match, we cannot quit as soon as a match is detected. Instead, after a match - is detected, we must keep searching until either the end of the input or - until a dead state is seen. (Dead states are not used for standard match - semantics. Dead states mean that searching should stop after a match has been - found.) - -Other implementations of Aho-Corasick do support leftmost match semantics, but -they do it with more overhead at search time, or even worse, with a queue of -matches and sophisticated hijinks to disambiguate the matches. While our -construction algorithm becomes a bit more complicated, the correct match -semantics fall out from the structure of the automaton itself. - - -# Overlapping matches - -One of the nice properties of an Aho-Corasick automaton is that it can report -all possible matches, even when they overlap with one another. In this mode, -the match semantics don't matter, since all possible matches are reported. -Overlapping searches work just like regular searches, except the state -identifier at which the previous search left off is carried over to the next -search, so that it can pick up where it left off. If there are additional -matches at that state, then they are reported before resuming the search. - -Enabling leftmost-first or leftmost-longest match semantics causes the -automaton to use a subset of all failure transitions, which means that -overlapping searches cannot be used. Therefore, if leftmost match semantics are -used, attempting to do an overlapping search will panic. Thus, to get -overlapping searches, the caller must use the default standard match semantics. -This behavior was chosen because there are only two alternatives, which were -deemed worse: - -* Compile two automatons internally, one for standard semantics and one for - the semantics requested by the caller (if not standard). -* Create a new type, distinct from the `AhoCorasick` type, which has different - capabilities based on the configuration options. - -The first is untenable because of the amount of memory used by the automaton. -The second increases the complexity of the API too much by adding too many -types that do similar things. It is conceptually much simpler to keep all -searching isolated to a single type. Callers may query whether the automaton -supports overlapping searches via the `AhoCorasick::supports_overlapping` -method. - - -# Stream searching - -Since Aho-Corasick is an automaton, it is possible to do partial searches on -partial parts of the haystack, and then resume that search on subsequent pieces -of the haystack. This is useful when the haystack you're trying to search is -not stored contiguously in memory, or if one does not want to read the entire -haystack into memory at once. - -Currently, only standard semantics are supported for stream searching. This is -some of the more complicated code in this crate, and is something I would very -much like to improve. In particular, it currently has the restriction that it -must buffer at least enough of the haystack in memory in order to fit the -longest possible match. The difficulty in getting stream searching right is -that the implementation choices (such as the buffer size) often impact what the -API looks like and what it's allowed to do. - - -# Prefilters - -In some cases, Aho-Corasick is not the fastest way to find matches containing -multiple patterns. Sometimes, the search can be accelerated using highly -optimized SIMD routines. For example, consider searching the following -patterns: - - Sherlock - Moriarty - Watson - -It is plausible that it would be much faster to quickly look for occurrences of -the leading bytes, `S`, `M` or `W`, before trying to start searching via the -automaton. Indeed, this is exactly what this crate will do. - -When there are more than three distinct starting bytes, then this crate will -look for three distinct bytes occurring at any position in the patterns, while -preferring bytes that are heuristically determined to be rare over others. For -example: - - Abuzz - Sanchez - Vasquez - Topaz - Waltz - -Here, we have more than 3 distinct starting bytes, but all of the patterns -contain `z`, which is typically a rare byte. In this case, the prefilter will -scan for `z`, back up a bit, and then execute the Aho-Corasick automaton. - -If all of that fails, then a packed multiple substring algorithm will be -attempted. Currently, the only algorithm available for this is Teddy, but more -may be added in the future. Teddy is unlike the above prefilters in that it -confirms its own matches, so when Teddy is active, it might not be necessary -for Aho-Corasick to run at all. (See `Automaton::leftmost_find_at_no_state_imp` -in `src/automaton.rs`.) However, the current Teddy implementation only works -in `x86_64` and when SSSE3 or AVX2 are available, and moreover, only works -_well_ when there are a small number of patterns (say, less than 100). Teddy -also requires the haystack to be of a certain length (more than 16-34 bytes). -When the haystack is shorter than that, Rabin-Karp is used instead. (See -`src/packed/rabinkarp.rs`.) - -There is a more thorough description of Teddy at -[`src/packed/teddy/README.md`](src/packed/teddy/README.md). diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/LICENSE-MIT clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/LICENSE-MIT --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/LICENSE-MIT 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Andrew Gallant - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/README.md clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/README.md --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/README.md 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,187 +0,0 @@ -aho-corasick -============ -A library for finding occurrences of many patterns at once with SIMD -acceleration in some cases. This library provides multiple pattern -search principally through an implementation of the -[Aho-Corasick algorithm](https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_algorithm), -which builds a finite state machine for executing searches in linear time. -Features include case insensitive matching, overlapping matches, fast searching -via SIMD and optional full DFA construction and search & replace in streams. - -[![Build status](https://github.com/BurntSushi/aho-corasick/workflows/ci/badge.svg)](https://github.com/BurntSushi/aho-corasick/actions) -[![crates.io](https://img.shields.io/crates/v/aho-corasick.svg)](https://crates.io/crates/aho-corasick) - -Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/). - - -### Documentation - -https://docs.rs/aho-corasick - - -### Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -aho-corasick = "0.7" -``` - - -### Example: basic searching - -This example shows how to search for occurrences of multiple patterns -simultaneously. Each match includes the pattern that matched along with the -byte offsets of the match. - -```rust -use aho_corasick::AhoCorasick; - -let patterns = &["apple", "maple", "Snapple"]; -let haystack = "Nobody likes maple in their apple flavored Snapple."; - -let ac = AhoCorasick::new(patterns); -let mut matches = vec![]; -for mat in ac.find_iter(haystack) { - matches.push((mat.pattern(), mat.start(), mat.end())); -} -assert_eq!(matches, vec![ - (1, 13, 18), - (0, 28, 33), - (2, 43, 50), -]); -``` - - -### Example: case insensitivity - -This is like the previous example, but matches `Snapple` case insensitively -using `AhoCorasickBuilder`: - -```rust -use aho_corasick::AhoCorasickBuilder; - -let patterns = &["apple", "maple", "snapple"]; -let haystack = "Nobody likes maple in their apple flavored Snapple."; - -let ac = AhoCorasickBuilder::new() - .ascii_case_insensitive(true) - .build(patterns); -let mut matches = vec![]; -for mat in ac.find_iter(haystack) { - matches.push((mat.pattern(), mat.start(), mat.end())); -} -assert_eq!(matches, vec![ - (1, 13, 18), - (0, 28, 33), - (2, 43, 50), -]); -``` - - -### Example: replacing matches in a stream - -This example shows how to execute a search and replace on a stream without -loading the entire stream into memory first. - -```rust -use aho_corasick::AhoCorasick; - -let patterns = &["fox", "brown", "quick"]; -let replace_with = &["sloth", "grey", "slow"]; - -// In a real example, these might be `std::fs::File`s instead. All you need to -// do is supply a pair of `std::io::Read` and `std::io::Write` implementations. -let rdr = "The quick brown fox."; -let mut wtr = vec![]; - -let ac = AhoCorasick::new(patterns); -ac.stream_replace_all(rdr.as_bytes(), &mut wtr, replace_with) - .expect("stream_replace_all failed"); -assert_eq!(b"The slow grey sloth.".to_vec(), wtr); -``` - - -### Example: finding the leftmost first match - -In the textbook description of Aho-Corasick, its formulation is typically -structured such that it reports all possible matches, even when they overlap -with another. In many cases, overlapping matches may not be desired, such as -the case of finding all successive non-overlapping matches like you might with -a standard regular expression. - -Unfortunately the "obvious" way to modify the Aho-Corasick algorithm to do -this doesn't always work in the expected way, since it will report matches as -soon as they are seen. For example, consider matching the regex `Samwise|Sam` -against the text `Samwise`. Most regex engines (that are Perl-like, or -non-POSIX) will report `Samwise` as a match, but the standard Aho-Corasick -algorithm modified for reporting non-overlapping matches will report `Sam`. - -A novel contribution of this library is the ability to change the match -semantics of Aho-Corasick (without additional search time overhead) such that -`Samwise` is reported instead. For example, here's the standard approach: - -```rust -use aho_corasick::AhoCorasick; - -let patterns = &["Samwise", "Sam"]; -let haystack = "Samwise"; - -let ac = AhoCorasick::new(patterns); -let mat = ac.find(haystack).expect("should have a match"); -assert_eq!("Sam", &haystack[mat.start()..mat.end()]); -``` - -And now here's the leftmost-first version, which matches how a Perl-like -regex will work: - -```rust -use aho_corasick::{AhoCorasickBuilder, MatchKind}; - -let patterns = &["Samwise", "Sam"]; -let haystack = "Samwise"; - -let ac = AhoCorasickBuilder::new() - .match_kind(MatchKind::LeftmostFirst) - .build(patterns); -let mat = ac.find(haystack).expect("should have a match"); -assert_eq!("Samwise", &haystack[mat.start()..mat.end()]); -``` - -In addition to leftmost-first semantics, this library also supports -leftmost-longest semantics, which match the POSIX behavior of a regular -expression alternation. See `MatchKind` in the docs for more details. - - -### Minimum Rust version policy - -This crate's minimum supported `rustc` version is `1.41.1`. - -The current policy is that the minimum Rust version required to use this crate -can be increased in minor version updates. For example, if `crate 1.0` requires -Rust 1.20.0, then `crate 1.0.z` for all values of `z` will also require Rust -1.20.0 or newer. However, `crate 1.y` for `y > 0` may require a newer minimum -version of Rust. - -In general, this crate will be conservative with respect to the minimum -supported version of Rust. - - -### FFI bindings - -* [G-Research/ahocorasick_rs](https://github.com/G-Research/ahocorasick_rs/) -is a Python wrapper for this library. - - -### Future work - -Here are some plans for the future: - -* Assuming the current API is sufficient, I'd like to commit to it and release - a `1.0` version of this crate some time in the next 6-12 months. -* Support stream searching with leftmost match semantics. Currently, only - standard match semantics are supported. Getting this right seems possible, - but is tricky since the match state needs to be propagated through multiple - searches. (With standard semantics, as soon as a match is seen the search - ends.) diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/rustfmt.toml clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/rustfmt.toml --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/rustfmt.toml 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/rustfmt.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -max_width = 79 -use_small_heuristics = "max" diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/ahocorasick.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/ahocorasick.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/ahocorasick.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/ahocorasick.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2141 +0,0 @@ -use std::io; - -use crate::automaton::Automaton; -use crate::buffer::Buffer; -use crate::dfa::{self, DFA}; -use crate::error::Result; -use crate::nfa::{self, NFA}; -use crate::packed; -use crate::prefilter::{Prefilter, PrefilterState}; -use crate::state_id::StateID; -use crate::Match; - -/// An automaton for searching multiple strings in linear time. -/// -/// The `AhoCorasick` type supports a few basic ways of constructing an -/// automaton, including -/// [`AhoCorasick::new`](struct.AhoCorasick.html#method.new) -/// and -/// [`AhoCorasick::new_auto_configured`](struct.AhoCorasick.html#method.new_auto_configured). -/// However, there are a fair number of configurable options that can be set -/// by using -/// [`AhoCorasickBuilder`](struct.AhoCorasickBuilder.html) -/// instead. Such options include, but are not limited to, how matches are -/// determined, simple case insensitivity, whether to use a DFA or not and -/// various knobs for controlling the space-vs-time trade offs taken when -/// building the automaton. -/// -/// If you aren't sure where to start, try beginning with -/// [`AhoCorasick::new_auto_configured`](struct.AhoCorasick.html#method.new_auto_configured). -/// -/// # Resource usage -/// -/// Aho-Corasick automatons are always constructed in `O(p)` time, where `p` -/// is the combined length of all patterns being searched. With that said, -/// building an automaton can be fairly costly because of high constant -/// factors, particularly when enabling the -/// [DFA](struct.AhoCorasickBuilder.html#method.dfa) -/// option (which is disabled by default). For this reason, it's generally a -/// good idea to build an automaton once and reuse it as much as possible. -/// -/// Aho-Corasick automatons can also use a fair bit of memory. To get a -/// concrete idea of how much memory is being used, try using the -/// [`AhoCorasick::heap_bytes`](struct.AhoCorasick.html#method.heap_bytes) -/// method. -/// -/// # Examples -/// -/// This example shows how to search for occurrences of multiple patterns -/// simultaneously in a case insensitive fashion. Each match includes the -/// pattern that matched along with the byte offsets of the match. -/// -/// ``` -/// use aho_corasick::AhoCorasickBuilder; -/// -/// let patterns = &["apple", "maple", "snapple"]; -/// let haystack = "Nobody likes maple in their apple flavored Snapple."; -/// -/// let ac = AhoCorasickBuilder::new() -/// .ascii_case_insensitive(true) -/// .build(patterns); -/// let mut matches = vec![]; -/// for mat in ac.find_iter(haystack) { -/// matches.push((mat.pattern(), mat.start(), mat.end())); -/// } -/// assert_eq!(matches, vec![ -/// (1, 13, 18), -/// (0, 28, 33), -/// (2, 43, 50), -/// ]); -/// ``` -/// -/// This example shows how to replace matches with some other string: -/// -/// ``` -/// use aho_corasick::AhoCorasick; -/// -/// let patterns = &["fox", "brown", "quick"]; -/// let haystack = "The quick brown fox."; -/// let replace_with = &["sloth", "grey", "slow"]; -/// -/// let ac = AhoCorasick::new(patterns); -/// let result = ac.replace_all(haystack, replace_with); -/// assert_eq!(result, "The slow grey sloth."); -/// ``` -#[derive(Clone, Debug)] -pub struct AhoCorasick { - imp: Imp, - match_kind: MatchKind, -} - -impl AhoCorasick { - /// Create a new Aho-Corasick automaton using the default configuration. - /// - /// The default configuration optimizes for less space usage, but at the - /// expense of longer search times. To change the configuration, use - /// [`AhoCorasickBuilder`](struct.AhoCorasickBuilder.html) - /// for fine-grained control, or - /// [`AhoCorasick::new_auto_configured`](struct.AhoCorasick.html#method.new_auto_configured) - /// for automatic configuration if you aren't sure which settings to pick. - /// - /// This uses the default - /// [`MatchKind::Standard`](enum.MatchKind.html#variant.Standard) - /// match semantics, which reports a match as soon as it is found. This - /// corresponds to the standard match semantics supported by textbook - /// descriptions of the Aho-Corasick algorithm. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let ac = AhoCorasick::new(&[ - /// "foo", "bar", "baz", - /// ]); - /// assert_eq!(Some(1), ac.find("xxx bar xxx").map(|m| m.pattern())); - /// ``` - pub fn new(patterns: I) -> AhoCorasick - where - I: IntoIterator, - P: AsRef<[u8]>, - { - AhoCorasickBuilder::new().build(patterns) - } - - /// Build an Aho-Corasick automaton with an automatically determined - /// configuration. - /// - /// Specifically, this requires a slice of patterns instead of an iterator - /// since the configuration is determined by looking at the patterns before - /// constructing the automaton. The idea here is to balance space and time - /// automatically. That is, when searching a small number of patterns, this - /// will attempt to use the fastest possible configuration since the total - /// space required will be small anyway. As the number of patterns grows, - /// this will fall back to slower configurations that use less space. - /// - /// If you want auto configuration but with match semantics different from - /// the default `MatchKind::Standard`, then use - /// [`AhoCorasickBuilder::auto_configure`](struct.AhoCorasickBuilder.html#method.auto_configure). - /// - /// # Examples - /// - /// Basic usage is just like `new`, except you must provide a slice: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let ac = AhoCorasick::new_auto_configured(&[ - /// "foo", "bar", "baz", - /// ]); - /// assert_eq!(Some(1), ac.find("xxx bar xxx").map(|m| m.pattern())); - /// ``` - pub fn new_auto_configured(patterns: &[B]) -> AhoCorasick - where - B: AsRef<[u8]>, - { - AhoCorasickBuilder::new().auto_configure(patterns).build(patterns) - } -} - -impl AhoCorasick { - /// Returns true if and only if this automaton matches the haystack at any - /// position. - /// - /// `haystack` may be any type that is cheaply convertible to a `&[u8]`. - /// This includes, but is not limited to, `String`, `&str`, `Vec`, and - /// `&[u8]` itself. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let ac = AhoCorasick::new(&[ - /// "foo", "bar", "quux", "baz", - /// ]); - /// assert!(ac.is_match("xxx bar xxx")); - /// assert!(!ac.is_match("xxx qux xxx")); - /// ``` - pub fn is_match>(&self, haystack: B) -> bool { - self.earliest_find(haystack).is_some() - } - - /// Returns the location of the first detected match in `haystack`. - /// - /// This method has the same behavior regardless of the - /// [`MatchKind`](enum.MatchKind.html) - /// of this automaton. - /// - /// `haystack` may be any type that is cheaply convertible to a `&[u8]`. - /// This includes, but is not limited to, `String`, `&str`, `Vec`, and - /// `&[u8]` itself. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let ac = AhoCorasick::new(&[ - /// "abc", "b", - /// ]); - /// let mat = ac.earliest_find("abcd").expect("should have match"); - /// assert_eq!(1, mat.pattern()); - /// assert_eq!((1, 2), (mat.start(), mat.end())); - /// ``` - pub fn earliest_find>(&self, haystack: B) -> Option { - let mut prestate = PrefilterState::new(self.max_pattern_len()); - let mut start = self.imp.start_state(); - self.imp.earliest_find_at( - &mut prestate, - haystack.as_ref(), - 0, - &mut start, - ) - } - - /// Returns the location of the first match according to the match - /// semantics that this automaton was constructed with. - /// - /// When using `MatchKind::Standard`, this corresponds precisely to the - /// same behavior as - /// [`earliest_find`](struct.AhoCorasick.html#method.earliest_find). - /// Otherwise, match semantics correspond to either - /// [leftmost-first](enum.MatchKind.html#variant.LeftmostFirst) - /// or - /// [leftmost-longest](enum.MatchKind.html#variant.LeftmostLongest). - /// - /// `haystack` may be any type that is cheaply convertible to a `&[u8]`. - /// This includes, but is not limited to, `String`, `&str`, `Vec`, and - /// `&[u8]` itself. - /// - /// # Examples - /// - /// Basic usage, with standard semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::Standard) // default, not necessary - /// .build(patterns); - /// let mat = ac.find(haystack).expect("should have a match"); - /// assert_eq!("b", &haystack[mat.start()..mat.end()]); - /// ``` - /// - /// Now with leftmost-first semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns); - /// let mat = ac.find(haystack).expect("should have a match"); - /// assert_eq!("abc", &haystack[mat.start()..mat.end()]); - /// ``` - /// - /// And finally, leftmost-longest semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::LeftmostLongest) - /// .build(patterns); - /// let mat = ac.find(haystack).expect("should have a match"); - /// assert_eq!("abcd", &haystack[mat.start()..mat.end()]); - /// ``` - pub fn find>(&self, haystack: B) -> Option { - let mut prestate = PrefilterState::new(self.max_pattern_len()); - self.imp.find_at_no_state(&mut prestate, haystack.as_ref(), 0) - } - - /// Returns an iterator of non-overlapping matches, using the match - /// semantics that this automaton was constructed with. - /// - /// `haystack` may be any type that is cheaply convertible to a `&[u8]`. - /// This includes, but is not limited to, `String`, `&str`, `Vec`, and - /// `&[u8]` itself. - /// - /// # Examples - /// - /// Basic usage, with standard semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::Standard) // default, not necessary - /// .build(patterns); - /// let matches: Vec = ac - /// .find_iter(haystack) - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![2, 2, 2], matches); - /// ``` - /// - /// Now with leftmost-first semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns); - /// let matches: Vec = ac - /// .find_iter(haystack) - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![0, 2, 0], matches); - /// ``` - /// - /// And finally, leftmost-longest semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::LeftmostLongest) - /// .build(patterns); - /// let matches: Vec = ac - /// .find_iter(haystack) - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![0, 2, 1], matches); - /// ``` - pub fn find_iter<'a, 'b, B: ?Sized + AsRef<[u8]>>( - &'a self, - haystack: &'b B, - ) -> FindIter<'a, 'b, S> { - FindIter::new(self, haystack.as_ref()) - } - - /// Returns an iterator of overlapping matches in the given `haystack`. - /// - /// Overlapping matches can _only_ be detected using - /// `MatchKind::Standard` semantics. If this automaton was constructed with - /// leftmost semantics, then this method will panic. To determine whether - /// this will panic at runtime, use the - /// [`AhoCorasick::supports_overlapping`](struct.AhoCorasick.html#method.supports_overlapping) - /// method. - /// - /// `haystack` may be any type that is cheaply convertible to a `&[u8]`. - /// This includes, but is not limited to, `String`, `&str`, `Vec`, and - /// `&[u8]` itself. - /// - /// # Panics - /// - /// This panics when `AhoCorasick::supports_overlapping` returns `false`. - /// That is, this panics when this automaton's match semantics are not - /// `MatchKind::Standard`. - /// - /// # Examples - /// - /// Basic usage, with standard semantics: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::new(patterns); - /// let matches: Vec = ac - /// .find_overlapping_iter(haystack) - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![2, 0, 2, 2, 0, 1], matches); - /// ``` - pub fn find_overlapping_iter<'a, 'b, B: ?Sized + AsRef<[u8]>>( - &'a self, - haystack: &'b B, - ) -> FindOverlappingIter<'a, 'b, S> { - FindOverlappingIter::new(self, haystack.as_ref()) - } - - /// Replace all matches with a corresponding value in the `replace_with` - /// slice given. Matches correspond to the same matches as reported by - /// [`find_iter`](struct.AhoCorasick.html#method.find_iter). - /// - /// Replacements are determined by the index of the matching pattern. - /// For example, if the pattern with index `2` is found, then it is - /// replaced by `replace_with[2]`. - /// - /// # Panics - /// - /// This panics when `replace_with.len()` does not equal the total number - /// of patterns that are matched by this automaton. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns); - /// let result = ac.replace_all(haystack, &["x", "y", "z"]); - /// assert_eq!("x the z to the xage", result); - /// ``` - pub fn replace_all(&self, haystack: &str, replace_with: &[B]) -> String - where - B: AsRef, - { - assert_eq!( - replace_with.len(), - self.pattern_count(), - "replace_all requires a replacement for every pattern \ - in the automaton" - ); - let mut dst = String::with_capacity(haystack.len()); - self.replace_all_with(haystack, &mut dst, |mat, _, dst| { - dst.push_str(replace_with[mat.pattern()].as_ref()); - true - }); - dst - } - - /// Replace all matches using raw bytes with a corresponding value in the - /// `replace_with` slice given. Matches correspond to the same matches as - /// reported by [`find_iter`](struct.AhoCorasick.html#method.find_iter). - /// - /// Replacements are determined by the index of the matching pattern. - /// For example, if the pattern with index `2` is found, then it is - /// replaced by `replace_with[2]`. - /// - /// # Panics - /// - /// This panics when `replace_with.len()` does not equal the total number - /// of patterns that are matched by this automaton. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = b"append the app to the appendage"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns); - /// let result = ac.replace_all_bytes(haystack, &["x", "y", "z"]); - /// assert_eq!(b"x the z to the xage".to_vec(), result); - /// ``` - pub fn replace_all_bytes( - &self, - haystack: &[u8], - replace_with: &[B], - ) -> Vec - where - B: AsRef<[u8]>, - { - assert_eq!( - replace_with.len(), - self.pattern_count(), - "replace_all_bytes requires a replacement for every pattern \ - in the automaton" - ); - let mut dst = Vec::with_capacity(haystack.len()); - self.replace_all_with_bytes(haystack, &mut dst, |mat, _, dst| { - dst.extend(replace_with[mat.pattern()].as_ref()); - true - }); - dst - } - - /// Replace all matches using a closure called on each match. - /// Matches correspond to the same matches as reported by - /// [`find_iter`](struct.AhoCorasick.html#method.find_iter). - /// - /// The closure accepts three parameters: the match found, the text of - /// the match and a string buffer with which to write the replaced text - /// (if any). If the closure returns `true`, then it continues to the next - /// match. If the closure returns `false`, then searching is stopped. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns); - /// let mut result = String::new(); - /// ac.replace_all_with(haystack, &mut result, |mat, _, dst| { - /// dst.push_str(&mat.pattern().to_string()); - /// true - /// }); - /// assert_eq!("0 the 2 to the 0age", result); - /// ``` - /// - /// Stopping the replacement by returning `false` (continued from the - /// example above): - /// - /// ``` - /// # use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// # let patterns = &["append", "appendage", "app"]; - /// # let haystack = "append the app to the appendage"; - /// # let ac = AhoCorasickBuilder::new() - /// # .match_kind(MatchKind::LeftmostFirst) - /// # .build(patterns); - /// let mut result = String::new(); - /// ac.replace_all_with(haystack, &mut result, |mat, _, dst| { - /// dst.push_str(&mat.pattern().to_string()); - /// mat.pattern() != 2 - /// }); - /// assert_eq!("0 the 2 to the appendage", result); - /// ``` - pub fn replace_all_with( - &self, - haystack: &str, - dst: &mut String, - mut replace_with: F, - ) where - F: FnMut(&Match, &str, &mut String) -> bool, - { - let mut last_match = 0; - for mat in self.find_iter(haystack) { - dst.push_str(&haystack[last_match..mat.start()]); - last_match = mat.end(); - if !replace_with(&mat, &haystack[mat.start()..mat.end()], dst) { - break; - }; - } - dst.push_str(&haystack[last_match..]); - } - - /// Replace all matches using raw bytes with a closure called on each - /// match. Matches correspond to the same matches as reported by - /// [`find_iter`](struct.AhoCorasick.html#method.find_iter). - /// - /// The closure accepts three parameters: the match found, the text of - /// the match and a byte buffer with which to write the replaced text - /// (if any). If the closure returns `true`, then it continues to the next - /// match. If the closure returns `false`, then searching is stopped. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = b"append the app to the appendage"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns); - /// let mut result = vec![]; - /// ac.replace_all_with_bytes(haystack, &mut result, |mat, _, dst| { - /// dst.extend(mat.pattern().to_string().bytes()); - /// true - /// }); - /// assert_eq!(b"0 the 2 to the 0age".to_vec(), result); - /// ``` - /// - /// Stopping the replacement by returning `false` (continued from the - /// example above): - /// - /// ``` - /// # use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// # let patterns = &["append", "appendage", "app"]; - /// # let haystack = b"append the app to the appendage"; - /// # let ac = AhoCorasickBuilder::new() - /// # .match_kind(MatchKind::LeftmostFirst) - /// # .build(patterns); - /// let mut result = vec![]; - /// ac.replace_all_with_bytes(haystack, &mut result, |mat, _, dst| { - /// dst.extend(mat.pattern().to_string().bytes()); - /// mat.pattern() != 2 - /// }); - /// assert_eq!(b"0 the 2 to the appendage".to_vec(), result); - /// ``` - pub fn replace_all_with_bytes( - &self, - haystack: &[u8], - dst: &mut Vec, - mut replace_with: F, - ) where - F: FnMut(&Match, &[u8], &mut Vec) -> bool, - { - let mut last_match = 0; - for mat in self.find_iter(haystack) { - dst.extend(&haystack[last_match..mat.start()]); - last_match = mat.end(); - if !replace_with(&mat, &haystack[mat.start()..mat.end()], dst) { - break; - }; - } - dst.extend(&haystack[last_match..]); - } - - /// Returns an iterator of non-overlapping matches in the given - /// stream. Matches correspond to the same matches as reported by - /// [`find_iter`](struct.AhoCorasick.html#method.find_iter). - /// - /// The matches yielded by this iterator use absolute position offsets in - /// the stream given, where the first byte has index `0`. Matches are - /// yieled until the stream is exhausted. - /// - /// Each item yielded by the iterator is an `io::Result`, where an - /// error is yielded if there was a problem reading from the reader given. - /// - /// When searching a stream, an internal buffer is used. Therefore, callers - /// should avoiding providing a buffered reader, if possible. - /// - /// Searching a stream requires that the automaton was built with - /// `MatchKind::Standard` semantics. If this automaton was constructed - /// with leftmost semantics, then this method will panic. To determine - /// whether this will panic at runtime, use the - /// [`AhoCorasick::supports_stream`](struct.AhoCorasick.html#method.supports_stream) - /// method. - /// - /// # Memory usage - /// - /// In general, searching streams will use a constant amount of memory for - /// its internal buffer. The one requirement is that the internal buffer - /// must be at least the size of the longest possible match. In most use - /// cases, the default buffer size will be much larger than any individual - /// match. - /// - /// # Panics - /// - /// This panics when `AhoCorasick::supports_stream` returns `false`. - /// That is, this panics when this automaton's match semantics are not - /// `MatchKind::Standard`. This restriction may be lifted in the future. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// # fn example() -> Result<(), ::std::io::Error> { - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::new(patterns); - /// let mut matches = vec![]; - /// for result in ac.stream_find_iter(haystack.as_bytes()) { - /// let mat = result?; - /// matches.push(mat.pattern()); - /// } - /// assert_eq!(vec![2, 2, 2], matches); - /// # Ok(()) }; example().unwrap() - /// ``` - pub fn stream_find_iter<'a, R: io::Read>( - &'a self, - rdr: R, - ) -> StreamFindIter<'a, R, S> { - StreamFindIter::new(self, rdr) - } - - /// Search for and replace all matches of this automaton in - /// the given reader, and write the replacements to the given - /// writer. Matches correspond to the same matches as reported by - /// [`find_iter`](struct.AhoCorasick.html#method.find_iter). - /// - /// Replacements are determined by the index of the matching pattern. - /// For example, if the pattern with index `2` is found, then it is - /// replaced by `replace_with[2]`. - /// - /// After all matches are replaced, the writer is _not_ flushed. - /// - /// If there was a problem reading from the given reader or writing to the - /// given writer, then the corresponding `io::Error` is returned and all - /// replacement is stopped. - /// - /// When searching a stream, an internal buffer is used. Therefore, callers - /// should avoiding providing a buffered reader, if possible. However, - /// callers may want to provide a buffered writer. - /// - /// Searching a stream requires that the automaton was built with - /// `MatchKind::Standard` semantics. If this automaton was constructed - /// with leftmost semantics, then this method will panic. To determine - /// whether this will panic at runtime, use the - /// [`AhoCorasick::supports_stream`](struct.AhoCorasick.html#method.supports_stream) - /// method. - /// - /// # Memory usage - /// - /// In general, searching streams will use a constant amount of memory for - /// its internal buffer. The one requirement is that the internal buffer - /// must be at least the size of the longest possible match. In most use - /// cases, the default buffer size will be much larger than any individual - /// match. - /// - /// # Panics - /// - /// This panics when `AhoCorasick::supports_stream` returns `false`. - /// That is, this panics when this automaton's match semantics are not - /// `MatchKind::Standard`. This restriction may be lifted in the future. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// # fn example() -> Result<(), ::std::io::Error> { - /// let patterns = &["fox", "brown", "quick"]; - /// let haystack = "The quick brown fox."; - /// let replace_with = &["sloth", "grey", "slow"]; - /// - /// let ac = AhoCorasick::new(patterns); - /// let mut result = vec![]; - /// ac.stream_replace_all(haystack.as_bytes(), &mut result, replace_with)?; - /// assert_eq!(b"The slow grey sloth.".to_vec(), result); - /// # Ok(()) }; example().unwrap() - /// ``` - pub fn stream_replace_all( - &self, - rdr: R, - wtr: W, - replace_with: &[B], - ) -> io::Result<()> - where - R: io::Read, - W: io::Write, - B: AsRef<[u8]>, - { - assert_eq!( - replace_with.len(), - self.pattern_count(), - "stream_replace_all requires a replacement for every pattern \ - in the automaton" - ); - self.stream_replace_all_with(rdr, wtr, |mat, _, wtr| { - wtr.write_all(replace_with[mat.pattern()].as_ref()) - }) - } - - /// Search the given reader and replace all matches of this automaton - /// using the given closure. The result is written to the given - /// writer. Matches correspond to the same matches as reported by - /// [`find_iter`](struct.AhoCorasick.html#method.find_iter). - /// - /// The closure accepts three parameters: the match found, the text of - /// the match and the writer with which to write the replaced text (if any). - /// - /// After all matches are replaced, the writer is _not_ flushed. - /// - /// If there was a problem reading from the given reader or writing to the - /// given writer, then the corresponding `io::Error` is returned and all - /// replacement is stopped. - /// - /// When searching a stream, an internal buffer is used. Therefore, callers - /// should avoiding providing a buffered reader, if possible. However, - /// callers may want to provide a buffered writer. - /// - /// Searching a stream requires that the automaton was built with - /// `MatchKind::Standard` semantics. If this automaton was constructed - /// with leftmost semantics, then this method will panic. To determine - /// whether this will panic at runtime, use the - /// [`AhoCorasick::supports_stream`](struct.AhoCorasick.html#method.supports_stream) - /// method. - /// - /// # Memory usage - /// - /// In general, searching streams will use a constant amount of memory for - /// its internal buffer. The one requirement is that the internal buffer - /// must be at least the size of the longest possible match. In most use - /// cases, the default buffer size will be much larger than any individual - /// match. - /// - /// # Panics - /// - /// This panics when `AhoCorasick::supports_stream` returns `false`. - /// That is, this panics when this automaton's match semantics are not - /// `MatchKind::Standard`. This restriction may be lifted in the future. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::io::Write; - /// use aho_corasick::AhoCorasick; - /// - /// # fn example() -> Result<(), ::std::io::Error> { - /// let patterns = &["fox", "brown", "quick"]; - /// let haystack = "The quick brown fox."; - /// - /// let ac = AhoCorasick::new(patterns); - /// let mut result = vec![]; - /// ac.stream_replace_all_with( - /// haystack.as_bytes(), - /// &mut result, - /// |mat, _, wtr| { - /// wtr.write_all(mat.pattern().to_string().as_bytes()) - /// }, - /// )?; - /// assert_eq!(b"The 2 1 0.".to_vec(), result); - /// # Ok(()) }; example().unwrap() - /// ``` - pub fn stream_replace_all_with( - &self, - rdr: R, - mut wtr: W, - mut replace_with: F, - ) -> io::Result<()> - where - R: io::Read, - W: io::Write, - F: FnMut(&Match, &[u8], &mut W) -> io::Result<()>, - { - let mut it = StreamChunkIter::new(self, rdr); - while let Some(result) = it.next() { - let chunk = result?; - match chunk { - StreamChunk::NonMatch { bytes, .. } => { - wtr.write_all(bytes)?; - } - StreamChunk::Match { bytes, mat } => { - replace_with(&mat, bytes, &mut wtr)?; - } - } - } - Ok(()) - } - - /// Returns the match kind used by this automaton. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let ac = AhoCorasick::new(&[ - /// "foo", "bar", "quux", "baz", - /// ]); - /// assert_eq!(&MatchKind::Standard, ac.match_kind()); - /// ``` - pub fn match_kind(&self) -> &MatchKind { - self.imp.match_kind() - } - - /// Returns the length of the longest pattern matched by this automaton. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let ac = AhoCorasick::new(&[ - /// "foo", "bar", "quux", "baz", - /// ]); - /// assert_eq!(4, ac.max_pattern_len()); - /// ``` - pub fn max_pattern_len(&self) -> usize { - self.imp.max_pattern_len() - } - - /// Return the total number of patterns matched by this automaton. - /// - /// This includes patterns that may never participate in a match. For - /// example, if - /// [`MatchKind::LeftmostFirst`](enum.MatchKind.html#variant.LeftmostFirst) - /// match semantics are used, and the patterns `Sam` and `Samwise` were - /// used to build the automaton, then `Samwise` can never participate in a - /// match because `Sam` will always take priority. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let ac = AhoCorasick::new(&[ - /// "foo", "bar", "baz", - /// ]); - /// assert_eq!(3, ac.pattern_count()); - /// ``` - pub fn pattern_count(&self) -> usize { - self.imp.pattern_count() - } - - /// Returns true if and only if this automaton supports reporting - /// overlapping matches. - /// - /// If this returns false and overlapping matches are requested, then it - /// will result in a panic. - /// - /// Since leftmost matching is inherently incompatible with overlapping - /// matches, only - /// [`MatchKind::Standard`](enum.MatchKind.html#variant.Standard) - /// supports overlapping matches. This is unlikely to change in the future. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::Standard) - /// .build(&["foo", "bar", "baz"]); - /// assert!(ac.supports_overlapping()); - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(&["foo", "bar", "baz"]); - /// assert!(!ac.supports_overlapping()); - /// ``` - pub fn supports_overlapping(&self) -> bool { - self.match_kind.supports_overlapping() - } - - /// Returns true if and only if this automaton supports stream searching. - /// - /// If this returns false and stream searching (or replacing) is attempted, - /// then it will result in a panic. - /// - /// Currently, only - /// [`MatchKind::Standard`](enum.MatchKind.html#variant.Standard) - /// supports streaming. This may be expanded in the future. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::Standard) - /// .build(&["foo", "bar", "baz"]); - /// assert!(ac.supports_stream()); - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(&["foo", "bar", "baz"]); - /// assert!(!ac.supports_stream()); - /// ``` - pub fn supports_stream(&self) -> bool { - self.match_kind.supports_stream() - } - - /// Returns the approximate total amount of heap used by this automaton, in - /// units of bytes. - /// - /// # Examples - /// - /// This example shows the difference in heap usage between a few - /// configurations: - /// - /// ```ignore - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let ac = AhoCorasickBuilder::new() - /// .dfa(false) // default - /// .build(&["foo", "bar", "baz"]); - /// assert_eq!(10_336, ac.heap_bytes()); - /// - /// let ac = AhoCorasickBuilder::new() - /// .dfa(false) // default - /// .ascii_case_insensitive(true) - /// .build(&["foo", "bar", "baz"]); - /// assert_eq!(10_384, ac.heap_bytes()); - /// - /// let ac = AhoCorasickBuilder::new() - /// .dfa(true) - /// .ascii_case_insensitive(true) - /// .build(&["foo", "bar", "baz"]); - /// assert_eq!(1_248, ac.heap_bytes()); - /// ``` - pub fn heap_bytes(&self) -> usize { - match self.imp { - Imp::NFA(ref nfa) => nfa.heap_bytes(), - Imp::DFA(ref dfa) => dfa.heap_bytes(), - } - } -} - -/// The internal implementation of Aho-Corasick, which is either an NFA or -/// a DFA. The NFA is slower but uses less memory. The DFA is faster but uses -/// more memory. -#[derive(Clone, Debug)] -enum Imp { - NFA(NFA), - DFA(DFA), -} - -impl Imp { - /// Returns the type of match semantics implemented by this automaton. - fn match_kind(&self) -> &MatchKind { - match *self { - Imp::NFA(ref nfa) => nfa.match_kind(), - Imp::DFA(ref dfa) => dfa.match_kind(), - } - } - - /// Returns the identifier of the start state. - fn start_state(&self) -> S { - match *self { - Imp::NFA(ref nfa) => nfa.start_state(), - Imp::DFA(ref dfa) => dfa.start_state(), - } - } - - /// The length, in bytes, of the longest pattern in this automaton. This - /// information is useful for maintaining correct buffer sizes when - /// searching on streams. - fn max_pattern_len(&self) -> usize { - match *self { - Imp::NFA(ref nfa) => nfa.max_pattern_len(), - Imp::DFA(ref dfa) => dfa.max_pattern_len(), - } - } - - /// The total number of patterns added to this automaton. This includes - /// patterns that may never match. The maximum matching pattern that can be - /// reported is exactly one less than this number. - fn pattern_count(&self) -> usize { - match *self { - Imp::NFA(ref nfa) => nfa.pattern_count(), - Imp::DFA(ref dfa) => dfa.pattern_count(), - } - } - - /// Returns the prefilter object, if one exists, for the underlying - /// automaton. - fn prefilter(&self) -> Option<&dyn Prefilter> { - match *self { - Imp::NFA(ref nfa) => nfa.prefilter(), - Imp::DFA(ref dfa) => dfa.prefilter(), - } - } - - /// Returns true if and only if we should attempt to use a prefilter. - fn use_prefilter(&self) -> bool { - let p = match self.prefilter() { - None => return false, - Some(p) => p, - }; - !p.looks_for_non_start_of_match() - } - - #[inline(always)] - fn overlapping_find_at( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - at: usize, - state_id: &mut S, - match_index: &mut usize, - ) -> Option { - match *self { - Imp::NFA(ref nfa) => nfa.overlapping_find_at( - prestate, - haystack, - at, - state_id, - match_index, - ), - Imp::DFA(ref dfa) => dfa.overlapping_find_at( - prestate, - haystack, - at, - state_id, - match_index, - ), - } - } - - #[inline(always)] - fn earliest_find_at( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - at: usize, - state_id: &mut S, - ) -> Option { - match *self { - Imp::NFA(ref nfa) => { - nfa.earliest_find_at(prestate, haystack, at, state_id) - } - Imp::DFA(ref dfa) => { - dfa.earliest_find_at(prestate, haystack, at, state_id) - } - } - } - - #[inline(always)] - fn find_at_no_state( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - at: usize, - ) -> Option { - match *self { - Imp::NFA(ref nfa) => nfa.find_at_no_state(prestate, haystack, at), - Imp::DFA(ref dfa) => dfa.find_at_no_state(prestate, haystack, at), - } - } -} - -/// An iterator of non-overlapping matches in a particular haystack. -/// -/// This iterator yields matches according to the -/// [`MatchKind`](enum.MatchKind.html) -/// used by this automaton. -/// -/// This iterator is constructed via the -/// [`AhoCorasick::find_iter`](struct.AhoCorasick.html#method.find_iter) -/// method. -/// -/// The type variable `S` refers to the representation used for state -/// identifiers. (By default, this is `usize`.) -/// -/// The lifetime `'a` refers to the lifetime of the `AhoCorasick` automaton. -/// -/// The lifetime `'b` refers to the lifetime of the haystack being searched. -#[derive(Debug)] -pub struct FindIter<'a, 'b, S: StateID> { - fsm: &'a Imp, - prestate: PrefilterState, - haystack: &'b [u8], - pos: usize, -} - -impl<'a, 'b, S: StateID> FindIter<'a, 'b, S> { - fn new(ac: &'a AhoCorasick, haystack: &'b [u8]) -> FindIter<'a, 'b, S> { - let prestate = PrefilterState::new(ac.max_pattern_len()); - FindIter { fsm: &ac.imp, prestate, haystack, pos: 0 } - } -} - -impl<'a, 'b, S: StateID> Iterator for FindIter<'a, 'b, S> { - type Item = Match; - - fn next(&mut self) -> Option { - if self.pos > self.haystack.len() { - return None; - } - let result = self.fsm.find_at_no_state( - &mut self.prestate, - self.haystack, - self.pos, - ); - let mat = match result { - None => return None, - Some(mat) => mat, - }; - if mat.end() == self.pos { - // If the automaton can match the empty string and if we found an - // empty match, then we need to forcefully move the position. - self.pos += 1; - } else { - self.pos = mat.end(); - } - Some(mat) - } -} - -/// An iterator of overlapping matches in a particular haystack. -/// -/// This iterator will report all possible matches in a particular haystack, -/// even when the matches overlap. -/// -/// This iterator is constructed via the -/// [`AhoCorasick::find_overlapping_iter`](struct.AhoCorasick.html#method.find_overlapping_iter) -/// method. -/// -/// The type variable `S` refers to the representation used for state -/// identifiers. (By default, this is `usize`.) -/// -/// The lifetime `'a` refers to the lifetime of the `AhoCorasick` automaton. -/// -/// The lifetime `'b` refers to the lifetime of the haystack being searched. -#[derive(Debug)] -pub struct FindOverlappingIter<'a, 'b, S: StateID> { - fsm: &'a Imp, - prestate: PrefilterState, - haystack: &'b [u8], - pos: usize, - state_id: S, - match_index: usize, -} - -impl<'a, 'b, S: StateID> FindOverlappingIter<'a, 'b, S> { - fn new( - ac: &'a AhoCorasick, - haystack: &'b [u8], - ) -> FindOverlappingIter<'a, 'b, S> { - assert!( - ac.supports_overlapping(), - "automaton does not support overlapping searches" - ); - let prestate = PrefilterState::new(ac.max_pattern_len()); - FindOverlappingIter { - fsm: &ac.imp, - prestate, - haystack, - pos: 0, - state_id: ac.imp.start_state(), - match_index: 0, - } - } -} - -impl<'a, 'b, S: StateID> Iterator for FindOverlappingIter<'a, 'b, S> { - type Item = Match; - - fn next(&mut self) -> Option { - let result = self.fsm.overlapping_find_at( - &mut self.prestate, - self.haystack, - self.pos, - &mut self.state_id, - &mut self.match_index, - ); - match result { - None => return None, - Some(m) => { - self.pos = m.end(); - Some(m) - } - } - } -} - -/// An iterator that reports Aho-Corasick matches in a stream. -/// -/// This iterator yields elements of type `io::Result`, where an error -/// is reported if there was a problem reading from the underlying stream. -/// The iterator terminates only when the underlying stream reaches `EOF`. -/// -/// This iterator is constructed via the -/// [`AhoCorasick::stream_find_iter`](struct.AhoCorasick.html#method.stream_find_iter) -/// method. -/// -/// The type variable `R` refers to the `io::Read` stream that is being read -/// from. -/// -/// The type variable `S` refers to the representation used for state -/// identifiers. (By default, this is `usize`.) -/// -/// The lifetime `'a` refers to the lifetime of the `AhoCorasick` automaton. -#[derive(Debug)] -pub struct StreamFindIter<'a, R, S: StateID> { - it: StreamChunkIter<'a, R, S>, -} - -impl<'a, R: io::Read, S: StateID> StreamFindIter<'a, R, S> { - fn new(ac: &'a AhoCorasick, rdr: R) -> StreamFindIter<'a, R, S> { - StreamFindIter { it: StreamChunkIter::new(ac, rdr) } - } -} - -impl<'a, R: io::Read, S: StateID> Iterator for StreamFindIter<'a, R, S> { - type Item = io::Result; - - fn next(&mut self) -> Option> { - loop { - match self.it.next() { - None => return None, - Some(Err(err)) => return Some(Err(err)), - Some(Ok(StreamChunk::NonMatch { .. })) => {} - Some(Ok(StreamChunk::Match { mat, .. })) => { - return Some(Ok(mat)); - } - } - } - } -} - -/// An iterator over chunks in an underlying reader. Each chunk either -/// corresponds to non-matching bytes or matching bytes, but all bytes from -/// the underlying reader are reported in sequence. There may be an arbitrary -/// number of non-matching chunks before seeing a matching chunk. -/// -/// N.B. This does not actually implement Iterator because we need to borrow -/// from the underlying reader. But conceptually, it's still an iterator. -#[derive(Debug)] -struct StreamChunkIter<'a, R, S: StateID> { - /// The AC automaton. - fsm: &'a Imp, - /// State associated with this automaton's prefilter. It is a heuristic - /// for stopping the prefilter if it's deemed ineffective. - prestate: PrefilterState, - /// The source of bytes we read from. - rdr: R, - /// A fixed size buffer. This is what we actually search. There are some - /// invariants around the buffer's size, namely, it must be big enough to - /// contain the longest possible match. - buf: Buffer, - /// The ID of the FSM state we're currently in. - state_id: S, - /// The current position at which to start the next search in `buf`. - search_pos: usize, - /// The absolute position of `search_pos`, where `0` corresponds to the - /// position of the first byte read from `rdr`. - absolute_pos: usize, - /// The ending position of the last StreamChunk that was returned to the - /// caller. This position is used to determine whether we need to emit - /// non-matching bytes before emitting a match. - report_pos: usize, - /// A match that should be reported on the next call. - pending_match: Option, - /// Enabled only when the automaton can match the empty string. When - /// enabled, we need to execute one final search after consuming the - /// reader to find the trailing empty match. - has_empty_match_at_end: bool, -} - -/// A single chunk yielded by the stream chunk iterator. -/// -/// The `'r` lifetime refers to the lifetime of the stream chunk iterator. -#[derive(Debug)] -enum StreamChunk<'r> { - /// A chunk that does not contain any matches. - NonMatch { bytes: &'r [u8] }, - /// A chunk that precisely contains a match. - Match { bytes: &'r [u8], mat: Match }, -} - -impl<'a, R: io::Read, S: StateID> StreamChunkIter<'a, R, S> { - fn new(ac: &'a AhoCorasick, rdr: R) -> StreamChunkIter<'a, R, S> { - assert!( - ac.supports_stream(), - "stream searching is only supported for Standard match semantics" - ); - - let prestate = if ac.imp.use_prefilter() { - PrefilterState::new(ac.max_pattern_len()) - } else { - PrefilterState::disabled() - }; - let buf = Buffer::new(ac.imp.max_pattern_len()); - let state_id = ac.imp.start_state(); - StreamChunkIter { - fsm: &ac.imp, - prestate, - rdr, - buf, - state_id, - absolute_pos: 0, - report_pos: 0, - search_pos: 0, - pending_match: None, - has_empty_match_at_end: ac.is_match(""), - } - } - - fn next(&mut self) -> Option> { - loop { - if let Some(mut mat) = self.pending_match.take() { - let bytes = &self.buf.buffer()[mat.start()..mat.end()]; - self.report_pos = mat.end(); - mat = mat.increment(self.absolute_pos); - return Some(Ok(StreamChunk::Match { bytes, mat })); - } - if self.search_pos >= self.buf.len() { - if let Some(end) = self.unreported() { - let bytes = &self.buf.buffer()[self.report_pos..end]; - self.report_pos = end; - return Some(Ok(StreamChunk::NonMatch { bytes })); - } - if self.buf.len() >= self.buf.min_buffer_len() { - // This is the point at which we roll our buffer, which we - // only do if our buffer has at least the minimum amount of - // bytes in it. Before rolling, we update our various - // positions to be consistent with the buffer after it has - // been rolled. - - self.report_pos -= - self.buf.len() - self.buf.min_buffer_len(); - self.absolute_pos += - self.search_pos - self.buf.min_buffer_len(); - self.search_pos = self.buf.min_buffer_len(); - self.buf.roll(); - } - match self.buf.fill(&mut self.rdr) { - Err(err) => return Some(Err(err)), - Ok(false) => { - // We've hit EOF, but if there are still some - // unreported bytes remaining, return them now. - if self.report_pos < self.buf.len() { - let bytes = &self.buf.buffer()[self.report_pos..]; - self.report_pos = self.buf.len(); - - let chunk = StreamChunk::NonMatch { bytes }; - return Some(Ok(chunk)); - } else { - // We've reported everything, but there might still - // be a match at the very last position. - if !self.has_empty_match_at_end { - return None; - } - // fallthrough for another search to get trailing - // empty matches - self.has_empty_match_at_end = false; - } - } - Ok(true) => {} - } - } - let result = self.fsm.earliest_find_at( - &mut self.prestate, - self.buf.buffer(), - self.search_pos, - &mut self.state_id, - ); - match result { - None => { - self.search_pos = self.buf.len(); - } - Some(mat) => { - self.state_id = self.fsm.start_state(); - if mat.end() == self.search_pos { - // If the automaton can match the empty string and if - // we found an empty match, then we need to forcefully - // move the position. - self.search_pos += 1; - } else { - self.search_pos = mat.end(); - } - self.pending_match = Some(mat.clone()); - if self.report_pos < mat.start() { - let bytes = - &self.buf.buffer()[self.report_pos..mat.start()]; - self.report_pos = mat.start(); - - let chunk = StreamChunk::NonMatch { bytes }; - return Some(Ok(chunk)); - } - } - } - } - } - - fn unreported(&self) -> Option { - let end = self.search_pos.saturating_sub(self.buf.min_buffer_len()); - if self.report_pos < end { - Some(end) - } else { - None - } - } -} - -/// A builder for configuring an Aho-Corasick automaton. -#[derive(Clone, Debug)] -pub struct AhoCorasickBuilder { - nfa_builder: nfa::Builder, - dfa_builder: dfa::Builder, - dfa: bool, -} - -impl Default for AhoCorasickBuilder { - fn default() -> AhoCorasickBuilder { - AhoCorasickBuilder::new() - } -} - -impl AhoCorasickBuilder { - /// Create a new builder for configuring an Aho-Corasick automaton. - /// - /// If you don't need fine grained configuration or aren't sure which knobs - /// to set, try using - /// [`AhoCorasick::new_auto_configured`](struct.AhoCorasick.html#method.new_auto_configured) - /// instead. - pub fn new() -> AhoCorasickBuilder { - AhoCorasickBuilder { - nfa_builder: nfa::Builder::new(), - dfa_builder: dfa::Builder::new(), - dfa: false, - } - } - - /// Build an Aho-Corasick automaton using the configuration set on this - /// builder. - /// - /// A builder may be reused to create more automatons. - /// - /// This method will use the default for representing internal state - /// identifiers, which is `usize`. This guarantees that building the - /// automaton will succeed and is generally a good default, but can make - /// the size of the automaton 2-8 times bigger than it needs to be, - /// depending on your target platform. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasickBuilder; - /// - /// let patterns = &["foo", "bar", "baz"]; - /// let ac = AhoCorasickBuilder::new() - /// .build(patterns); - /// assert_eq!(Some(1), ac.find("xxx bar xxx").map(|m| m.pattern())); - /// ``` - pub fn build(&self, patterns: I) -> AhoCorasick - where - I: IntoIterator, - P: AsRef<[u8]>, - { - // The builder only returns an error if the chosen state ID - // representation is too small to fit all of the given patterns. In - // this case, since we fix the representation to usize, it will always - // work because it's impossible to overflow usize since the underlying - // storage would OOM long before that happens. - self.build_with_size::(patterns) - .expect("usize state ID type should always work") - } - - /// Build an Aho-Corasick automaton using the configuration set on this - /// builder with a specific state identifier representation. This only has - /// an effect when the `dfa` option is enabled. - /// - /// Generally, the choices for a state identifier representation are - /// `u8`, `u16`, `u32`, `u64` or `usize`, with `usize` being the default. - /// The advantage of choosing a smaller state identifier representation - /// is that the automaton produced will be smaller. This might be - /// beneficial for just generally using less space, or might even allow it - /// to fit more of the automaton in your CPU's cache, leading to overall - /// better search performance. - /// - /// Unlike the standard `build` method, this can report an error if the - /// state identifier representation cannot support the size of the - /// automaton. - /// - /// Note that the state identifier representation is determined by the - /// `S` type variable. This requires a type hint of some sort, either - /// by specifying the return type or using the turbofish, e.g., - /// `build_with_size::(...)`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, AhoCorasickBuilder}; - /// - /// # fn example() -> Result<(), ::aho_corasick::Error> { - /// let patterns = &["foo", "bar", "baz"]; - /// let ac: AhoCorasick = AhoCorasickBuilder::new() - /// .build_with_size(patterns)?; - /// assert_eq!(Some(1), ac.find("xxx bar xxx").map(|m| m.pattern())); - /// # Ok(()) }; example().unwrap() - /// ``` - /// - /// Or alternatively, with turbofish: - /// - /// ``` - /// use aho_corasick::AhoCorasickBuilder; - /// - /// # fn example() -> Result<(), ::aho_corasick::Error> { - /// let patterns = &["foo", "bar", "baz"]; - /// let ac = AhoCorasickBuilder::new() - /// .build_with_size::(patterns)?; - /// assert_eq!(Some(1), ac.find("xxx bar xxx").map(|m| m.pattern())); - /// # Ok(()) }; example().unwrap() - /// ``` - pub fn build_with_size( - &self, - patterns: I, - ) -> Result> - where - S: StateID, - I: IntoIterator, - P: AsRef<[u8]>, - { - let nfa = self.nfa_builder.build(patterns)?; - let match_kind = nfa.match_kind().clone(); - let imp = if self.dfa { - let dfa = self.dfa_builder.build(&nfa)?; - Imp::DFA(dfa) - } else { - Imp::NFA(nfa) - }; - Ok(AhoCorasick { imp, match_kind }) - } - - /// Automatically configure the settings on this builder according to the - /// patterns that will be used to construct the automaton. - /// - /// The idea here is to balance space and time automatically. That is, when - /// searching a small number of patterns, this will attempt to use the - /// fastest possible configuration since the total space required will be - /// small anyway. As the number of patterns grows, this will fall back to - /// slower configurations that use less space. - /// - /// This is guaranteed to never set `match_kind`, but any other option may - /// be overridden. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasickBuilder; - /// - /// let patterns = &["foo", "bar", "baz"]; - /// let ac = AhoCorasickBuilder::new() - /// .auto_configure(patterns) - /// .build(patterns); - /// assert_eq!(Some(1), ac.find("xxx bar xxx").map(|m| m.pattern())); - /// ``` - pub fn auto_configure>( - &mut self, - patterns: &[B], - ) -> &mut AhoCorasickBuilder { - // N.B. Currently we only use the length of `patterns` to make a - // decision here, and could therefore ask for an `ExactSizeIterator` - // instead. But it's conceivable that we might adapt this to look at - // the total number of bytes, which would requires a second pass. - // - // The logic here is fairly rudimentary at the moment, but probably - // OK. The idea here is to use the fastest thing possible for a small - // number of patterns. That is, a DFA with no byte classes, since byte - // classes require an extra indirection for every byte searched. With a - // moderate number of patterns, we still want a DFA, but save on both - // space and compilation time by enabling byte classes. Finally, fall - // back to the slower but smaller NFA. - if patterns.len() <= 100 { - // N.B. Using byte classes can actually be faster by improving - // locality, but this only really applies for multi-megabyte - // automata (i.e., automata that don't fit in your CPU's cache). - self.dfa(true); - } else if patterns.len() <= 5000 { - self.dfa(true); - } - self - } - - /// Set the desired match semantics. - /// - /// The default is `MatchKind::Standard`, which corresponds to the match - /// semantics supported by the standard textbook description of the - /// Aho-Corasick algorithm. Namely, matches are reported as soon as they - /// are found. Moreover, this is the only way to get overlapping matches - /// or do stream searching. - /// - /// The other kinds of match semantics that are supported are - /// `MatchKind::LeftmostFirst` and `MatchKind::LeftmostLongest`. The former - /// corresponds to the match you would get if you were to try to match - /// each pattern at each position in the haystack in the same order that - /// you give to the automaton. That is, it returns the leftmost match - /// corresponding the earliest pattern given to the automaton. The latter - /// corresponds to finding the longest possible match among all leftmost - /// matches. - /// - /// For more details on match semantics, see the - /// [documentation for `MatchKind`](enum.MatchKind.html). - /// - /// # Examples - /// - /// In these examples, we demonstrate the differences between match - /// semantics for a particular set of patterns in a specific order: - /// `b`, `abc`, `abcd`. - /// - /// Standard semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::Standard) // default, not necessary - /// .build(patterns); - /// let mat = ac.find(haystack).expect("should have a match"); - /// assert_eq!("b", &haystack[mat.start()..mat.end()]); - /// ``` - /// - /// Leftmost-first semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns); - /// let mat = ac.find(haystack).expect("should have a match"); - /// assert_eq!("abc", &haystack[mat.start()..mat.end()]); - /// ``` - /// - /// Leftmost-longest semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .match_kind(MatchKind::LeftmostLongest) - /// .build(patterns); - /// let mat = ac.find(haystack).expect("should have a match"); - /// assert_eq!("abcd", &haystack[mat.start()..mat.end()]); - /// ``` - pub fn match_kind(&mut self, kind: MatchKind) -> &mut AhoCorasickBuilder { - self.nfa_builder.match_kind(kind); - self - } - - /// Enable anchored mode, which requires all matches to start at the - /// first position in a haystack. - /// - /// This option is disabled by default. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasickBuilder; - /// - /// let patterns = &["foo", "bar"]; - /// let haystack = "foobar"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .anchored(true) - /// .build(patterns); - /// assert_eq!(1, ac.find_iter(haystack).count()); - /// ``` - /// - /// When searching for overlapping matches, all matches that start at - /// the beginning of a haystack will be reported: - /// - /// ``` - /// use aho_corasick::AhoCorasickBuilder; - /// - /// let patterns = &["foo", "foofoo"]; - /// let haystack = "foofoo"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .anchored(true) - /// .build(patterns); - /// assert_eq!(2, ac.find_overlapping_iter(haystack).count()); - /// // A non-anchored search would return 3 matches. - /// ``` - pub fn anchored(&mut self, yes: bool) -> &mut AhoCorasickBuilder { - self.nfa_builder.anchored(yes); - self - } - - /// Enable ASCII-aware case insensitive matching. - /// - /// When this option is enabled, searching will be performed without - /// respect to case for ASCII letters (`a-z` and `A-Z`) only. - /// - /// Enabling this option does not change the search algorithm, but it may - /// increase the size of the automaton. - /// - /// **NOTE:** It is unlikely that support for Unicode case folding will - /// be added in the future. The ASCII case works via a simple hack to the - /// underlying automaton, but full Unicode handling requires a fair bit of - /// sophistication. If you do need Unicode handling, you might consider - /// using the [`regex` crate](https://docs.rs/regex) or the lower level - /// [`regex-automata` crate](https://docs.rs/regex-automata). - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasickBuilder; - /// - /// let patterns = &["FOO", "bAr", "BaZ"]; - /// let haystack = "foo bar baz"; - /// - /// let ac = AhoCorasickBuilder::new() - /// .ascii_case_insensitive(true) - /// .build(patterns); - /// assert_eq!(3, ac.find_iter(haystack).count()); - /// ``` - pub fn ascii_case_insensitive( - &mut self, - yes: bool, - ) -> &mut AhoCorasickBuilder { - self.nfa_builder.ascii_case_insensitive(yes); - self - } - - /// Set the limit on how many NFA states use a dense representation for - /// their transitions. - /// - /// A dense representation uses more space, but supports faster access to - /// transitions at search time. Thus, this setting permits the control of a - /// space vs time trade off when using the NFA variant of Aho-Corasick. - /// - /// This limit is expressed in terms of the depth of a state, i.e., the - /// number of transitions from the starting state of the NFA. The idea is - /// that most of the time searching will be spent near the starting state - /// of the automaton, so states near the start state should use a dense - /// representation. States further away from the start state would then use - /// a sparse representation, which uses less space but is slower to access - /// transitions at search time. - /// - /// By default, this is set to a low but non-zero number. - /// - /// This setting has no effect if the `dfa` option is enabled. - pub fn dense_depth(&mut self, depth: usize) -> &mut AhoCorasickBuilder { - self.nfa_builder.dense_depth(depth); - self - } - - /// Compile the standard Aho-Corasick automaton into a deterministic finite - /// automaton (DFA). - /// - /// When this is disabled (which is the default), then a non-deterministic - /// finite automaton (NFA) is used instead. - /// - /// The main benefit to a DFA is that it can execute searches more quickly - /// than a NFA (perhaps 2-4 times as fast). The main drawback is that the - /// DFA uses more space and can take much longer to build. - /// - /// Enabling this option does not change the time complexity for - /// constructing the Aho-Corasick automaton (which is `O(p)` where - /// `p` is the total number of patterns being compiled). Enabling this - /// option does however reduce the time complexity of non-overlapping - /// searches from `O(n + p)` to `O(n)`, where `n` is the length of the - /// haystack. - /// - /// In general, it's a good idea to enable this if you're searching a - /// small number of fairly short patterns (~1000), or if you want the - /// fastest possible search without regard to compilation time or space - /// usage. - pub fn dfa(&mut self, yes: bool) -> &mut AhoCorasickBuilder { - self.dfa = yes; - self - } - - /// Enable heuristic prefilter optimizations. - /// - /// When enabled, searching will attempt to quickly skip to match - /// candidates using specialized literal search routines. A prefilter - /// cannot always be used, and is generally treated as a heuristic. It - /// can be useful to disable this if the prefilter is observed to be - /// sub-optimal for a particular workload. - /// - /// This is enabled by default. - pub fn prefilter(&mut self, yes: bool) -> &mut AhoCorasickBuilder { - self.nfa_builder.prefilter(yes); - self - } - - /// Shrink the size of the transition alphabet by mapping bytes to their - /// equivalence classes. This only has an effect when the `dfa` option is - /// enabled. - /// - /// When enabled, each a DFA will use a map from all possible bytes - /// to their corresponding equivalence class. Each equivalence class - /// represents a set of bytes that does not discriminate between a match - /// and a non-match in the DFA. For example, the patterns `bar` and `baz` - /// have at least five equivalence classes: singleton sets of `b`, `a`, `r` - /// and `z`, and a final set that contains every other byte. - /// - /// The advantage of this map is that the size of the transition table can - /// be reduced drastically from `#states * 256 * sizeof(id)` to - /// `#states * k * sizeof(id)` where `k` is the number of equivalence - /// classes. As a result, total space usage can decrease substantially. - /// Moreover, since a smaller alphabet is used, compilation becomes faster - /// as well. - /// - /// The disadvantage of this map is that every byte searched must be - /// passed through this map before it can be used to determine the next - /// transition. This has a small match time performance cost. However, if - /// the DFA is otherwise very large without byte classes, then using byte - /// classes can greatly improve memory locality and thus lead to better - /// overall performance. - /// - /// This option is enabled by default. - #[deprecated( - since = "0.7.16", - note = "not carrying its weight, will be always enabled, see: https://github.com/BurntSushi/aho-corasick/issues/57" - )] - pub fn byte_classes(&mut self, yes: bool) -> &mut AhoCorasickBuilder { - self.dfa_builder.byte_classes(yes); - self - } - - /// Premultiply state identifiers in the transition table. This only has - /// an effect when the `dfa` option is enabled. - /// - /// When enabled, state identifiers are premultiplied to point to their - /// corresponding row in the transition table. That is, given the `i`th - /// state, its corresponding premultiplied identifier is `i * k` where `k` - /// is the alphabet size of the automaton. (The alphabet size is at most - /// 256, but is in practice smaller if byte classes is enabled.) - /// - /// When state identifiers are not premultiplied, then the identifier of - /// the `i`th state is `i`. - /// - /// The advantage of premultiplying state identifiers is that is saves a - /// multiplication instruction per byte when searching with a DFA. This has - /// been observed to lead to a 20% performance benefit in micro-benchmarks. - /// - /// The primary disadvantage of premultiplying state identifiers is - /// that they require a larger integer size to represent. For example, - /// if the DFA has 200 states, then its premultiplied form requires 16 - /// bits to represent every possible state identifier, where as its - /// non-premultiplied form only requires 8 bits. - /// - /// This option is enabled by default. - #[deprecated( - since = "0.7.16", - note = "not carrying its weight, will be always enabled, see: https://github.com/BurntSushi/aho-corasick/issues/57" - )] - pub fn premultiply(&mut self, yes: bool) -> &mut AhoCorasickBuilder { - self.dfa_builder.premultiply(yes); - self - } -} - -/// A knob for controlling the match semantics of an Aho-Corasick automaton. -/// -/// There are two generally different ways that Aho-Corasick automatons can -/// report matches. The first way is the "standard" approach that results from -/// implementing most textbook explanations of Aho-Corasick. The second way is -/// to report only the leftmost non-overlapping matches. The leftmost approach -/// is in turn split into two different ways of resolving ambiguous matches: -/// leftmost-first and leftmost-longest. -/// -/// The `Standard` match kind is the default and is the only one that supports -/// overlapping matches and stream searching. (Trying to find overlapping -/// or streaming matches using leftmost match semantics will result in a -/// panic.) The `Standard` match kind will report matches as they are seen. -/// When searching for overlapping matches, then all possible matches are -/// reported. When searching for non-overlapping matches, the first match seen -/// is reported. For example, for non-overlapping matches, given the patterns -/// `abcd` and `b` and the subject string `abcdef`, only a match for `b` is -/// reported since it is detected first. The `abcd` match is never reported -/// since it overlaps with the `b` match. -/// -/// In contrast, the leftmost match kind always prefers the leftmost match -/// among all possible matches. Given the same example as above with `abcd` and -/// `b` as patterns and `abcdef` as the subject string, the leftmost match is -/// `abcd` since it begins before the `b` match, even though the `b` match is -/// detected before the `abcd` match. In this case, the `b` match is not -/// reported at all since it overlaps with the `abcd` match. -/// -/// The difference between leftmost-first and leftmost-longest is in how they -/// resolve ambiguous matches when there are multiple leftmost matches to -/// choose from. Leftmost-first always chooses the pattern that was provided -/// earliest, where as leftmost-longest always chooses the longest matching -/// pattern. For example, given the patterns `a` and `ab` and the subject -/// string `ab`, the leftmost-first match is `a` but the leftmost-longest match -/// is `ab`. Conversely, if the patterns were given in reverse order, i.e., -/// `ab` and `a`, then both the leftmost-first and leftmost-longest matches -/// would be `ab`. Stated differently, the leftmost-first match depends on the -/// order in which the patterns were given to the Aho-Corasick automaton. -/// Because of that, when leftmost-first matching is used, if a pattern `A` -/// that appears before a pattern `B` is a prefix of `B`, then it is impossible -/// to ever observe a match of `B`. -/// -/// If you're not sure which match kind to pick, then stick with the standard -/// kind, which is the default. In particular, if you need overlapping or -/// streaming matches, then you _must_ use the standard kind. The leftmost -/// kinds are useful in specific circumstances. For example, leftmost-first can -/// be very useful as a way to implement match priority based on the order of -/// patterns given and leftmost-longest can be useful for dictionary searching -/// such that only the longest matching words are reported. -/// -/// # Relationship with regular expression alternations -/// -/// Understanding match semantics can be a little tricky, and one easy way -/// to conceptualize non-overlapping matches from an Aho-Corasick automaton -/// is to think about them as a simple alternation of literals in a regular -/// expression. For example, let's say we wanted to match the strings -/// `Sam` and `Samwise`, which would turn into the regex `Sam|Samwise`. It -/// turns out that regular expression engines have two different ways of -/// matching this alternation. The first way, leftmost-longest, is commonly -/// found in POSIX compatible implementations of regular expressions (such as -/// `grep`). The second way, leftmost-first, is commonly found in backtracking -/// implementations such as Perl. (Some regex engines, such as RE2 and Rust's -/// regex engine do not use backtracking, but still implement leftmost-first -/// semantics in an effort to match the behavior of dominant backtracking -/// regex engines such as those found in Perl, Ruby, Python, Javascript and -/// PHP.) -/// -/// That is, when matching `Sam|Samwise` against `Samwise`, a POSIX regex -/// will match `Samwise` because it is the longest possible match, but a -/// Perl-like regex will match `Sam` since it appears earlier in the -/// alternation. Indeed, the regex `Sam|Samwise` in a Perl-like regex engine -/// will never match `Samwise` since `Sam` will always have higher priority. -/// Conversely, matching the regex `Samwise|Sam` against `Samwise` will lead to -/// a match of `Samwise` in both POSIX and Perl-like regexes since `Samwise` is -/// still longest match, but it also appears earlier than `Sam`. -/// -/// The "standard" match semantics of Aho-Corasick generally don't correspond -/// to the match semantics of any large group of regex implementations, so -/// there's no direct analogy that can be made here. Standard match semantics -/// are generally useful for overlapping matches, or if you just want to see -/// matches as they are detected. -/// -/// The main conclusion to draw from this section is that the match semantics -/// can be tweaked to precisely match either Perl-like regex alternations or -/// POSIX regex alternations. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum MatchKind { - /// Use standard match semantics, which support overlapping matches. When - /// used with non-overlapping matches, matches are reported as they are - /// seen. - Standard, - /// Use leftmost-first match semantics, which reports leftmost matches. - /// When there are multiple possible leftmost matches, the match - /// corresponding to the pattern that appeared earlier when constructing - /// the automaton is reported. - /// - /// This does **not** support overlapping matches or stream searching. If - /// this match kind is used, attempting to find overlapping matches or - /// stream matches will panic. - LeftmostFirst, - /// Use leftmost-longest match semantics, which reports leftmost matches. - /// When there are multiple possible leftmost matches, the longest match - /// is chosen. - /// - /// This does **not** support overlapping matches or stream searching. If - /// this match kind is used, attempting to find overlapping matches or - /// stream matches will panic. - LeftmostLongest, - /// Hints that destructuring should not be exhaustive. - /// - /// This enum may grow additional variants, so this makes sure clients - /// don't count on exhaustive matching. (Otherwise, adding a new variant - /// could break existing code.) - #[doc(hidden)] - __Nonexhaustive, -} - -/// The default match kind is `MatchKind::Standard`. -impl Default for MatchKind { - fn default() -> MatchKind { - MatchKind::Standard - } -} - -impl MatchKind { - fn supports_overlapping(&self) -> bool { - self.is_standard() - } - - fn supports_stream(&self) -> bool { - // TODO: It may be possible to support this. It's hard. - // - // See: https://github.com/rust-lang/regex/issues/425#issuecomment-471367838 - self.is_standard() - } - - pub(crate) fn is_standard(&self) -> bool { - *self == MatchKind::Standard - } - - pub(crate) fn is_leftmost(&self) -> bool { - *self == MatchKind::LeftmostFirst - || *self == MatchKind::LeftmostLongest - } - - pub(crate) fn is_leftmost_first(&self) -> bool { - *self == MatchKind::LeftmostFirst - } - - /// Convert this match kind into a packed match kind. If this match kind - /// corresponds to standard semantics, then this returns None, since - /// packed searching does not support standard semantics. - pub(crate) fn as_packed(&self) -> Option { - match *self { - MatchKind::Standard => None, - MatchKind::LeftmostFirst => Some(packed::MatchKind::LeftmostFirst), - MatchKind::LeftmostLongest => { - Some(packed::MatchKind::LeftmostLongest) - } - MatchKind::__Nonexhaustive => unreachable!(), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn oibits() { - use std::panic::{RefUnwindSafe, UnwindSafe}; - - fn assert_send() {} - fn assert_sync() {} - fn assert_unwind_safe() {} - - assert_send::(); - assert_sync::(); - assert_unwind_safe::(); - assert_send::(); - assert_sync::(); - assert_unwind_safe::(); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/automaton.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/automaton.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/automaton.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/automaton.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,573 +0,0 @@ -use crate::ahocorasick::MatchKind; -use crate::prefilter::{self, Candidate, Prefilter, PrefilterState}; -use crate::state_id::{dead_id, fail_id, StateID}; -use crate::Match; - -// NOTE: This trait essentially started as a copy of the same trait from from -// regex-automata, with some wording changed since we use this trait for -// NFAs in addition to DFAs in this crate. Additionally, we do not export -// this trait. It's only used internally to reduce code duplication. The -// regex-automata crate needs to expose it because its Regex type is generic -// over implementations of this trait. In this crate, we encapsulate everything -// behind the AhoCorasick type. -// -// This trait is a bit of a mess, but it's not quite clear how to fix it. -// Basically, there are several competing concerns: -// -// * We need performance, so everything effectively needs to get monomorphized. -// * There are several variations on searching Aho-Corasick automatons: -// overlapping, standard and leftmost. Overlapping and standard are somewhat -// combined together below, but there is no real way to combine standard with -// leftmost. Namely, leftmost requires continuing a search even after a match -// is found, in order to correctly disambiguate a match. -// * On top of that, *sometimes* callers want to know which state the automaton -// is in after searching. This is principally useful for overlapping and -// stream searches. However, when callers don't care about this, we really -// do not want to be forced to compute it, since it sometimes requires extra -// work. Thus, there are effectively two copies of leftmost searching: one -// for tracking the state ID and one that doesn't. We should ideally do the -// same for standard searching, but my sanity stopped me. - -// SAFETY RATIONALE: Previously, the code below went to some length to remove -// all bounds checks. This generally produced tighter assembly and lead to -// 20-50% improvements in micro-benchmarks on corpora made up of random -// characters. This somewhat makes sense, since the branch predictor is going -// to be at its worse on random text. -// -// However, using the aho-corasick-debug tool and manually benchmarking -// different inputs, the code *with* bounds checks actually wound up being -// slightly faster: -// -// $ cat input -// Sherlock Holmes -// John Watson -// Professor Moriarty -// Irene Adler -// Mary Watson -// -// $ aho-corasick-debug-safe \ -// input OpenSubtitles2018.raw.sample.en --kind leftmost-first --dfa -// pattern read time: 32.824µs -// automaton build time: 444.687µs -// automaton heap usage: 72392 bytes -// match count: 639 -// count time: 1.809961702s -// -// $ aho-corasick-debug-master \ -// input OpenSubtitles2018.raw.sample.en --kind leftmost-first --dfa -// pattern read time: 31.425µs -// automaton build time: 317.434µs -// automaton heap usage: 72392 bytes -// match count: 639 -// count time: 2.059157705s -// -// I was able to reproduce this result on two different machines (an i5 and -// an i7). Therefore, we go the route of safe code for now. - -/// A trait describing the interface of an Aho-Corasick finite state machine. -/// -/// Every automaton has exactly one fail state, one dead state and exactly one -/// start state. Generally, these correspond to the first, second and third -/// states, respectively. The dead state is always treated as a sentinel. That -/// is, no correct Aho-Corasick automaton will ever transition into the fail -/// state. The dead state, however, can be transitioned into, but only when -/// leftmost-first or leftmost-longest match semantics are enabled and only -/// when at least one match has been observed. -/// -/// Every automaton also has one or more match states, such that -/// `Automaton::is_match_state(id)` returns `true` if and only if `id` -/// corresponds to a match state. -pub trait Automaton { - /// The representation used for state identifiers in this automaton. - /// - /// Typically, this is one of `u8`, `u16`, `u32`, `u64` or `usize`. - type ID: StateID; - - /// The type of matching that should be done. - fn match_kind(&self) -> &MatchKind; - - /// Returns true if and only if this automaton uses anchored searches. - fn anchored(&self) -> bool; - - /// An optional prefilter for quickly skipping to the next candidate match. - /// A prefilter must report at least every match, although it may report - /// positions that do not correspond to a match. That is, it must not allow - /// false negatives, but can allow false positives. - /// - /// Currently, a prefilter only runs when the automaton is in the start - /// state. That is, the position reported by a prefilter should always - /// correspond to the start of a potential match. - fn prefilter(&self) -> Option<&dyn Prefilter>; - - /// Return the identifier of this automaton's start state. - fn start_state(&self) -> Self::ID; - - /// Returns true if and only if the given state identifier refers to a - /// valid state. - fn is_valid(&self, id: Self::ID) -> bool; - - /// Returns true if and only if the given identifier corresponds to a match - /// state. - /// - /// The state ID given must be valid, or else implementors may panic. - fn is_match_state(&self, id: Self::ID) -> bool; - - /// Returns true if and only if the given identifier corresponds to a state - /// that is either the dead state or a match state. - /// - /// Depending on the implementation of the automaton, this routine can - /// be used to save a branch in the core matching loop. Nevertheless, - /// `is_match_state(id) || id == dead_id()` is always a valid - /// implementation. Indeed, this is the default implementation. - /// - /// The state ID given must be valid, or else implementors may panic. - fn is_match_or_dead_state(&self, id: Self::ID) -> bool { - id == dead_id() || self.is_match_state(id) - } - - /// If the given state is a match state, return the match corresponding - /// to the given match index. `end` must be the ending position of the - /// detected match. If no match exists or if `match_index` exceeds the - /// number of matches in this state, then `None` is returned. - /// - /// The state ID given must be valid, or else implementors may panic. - /// - /// If the given state ID is correct and if the `match_index` is less than - /// the number of matches for that state, then this is guaranteed to return - /// a match. - fn get_match( - &self, - id: Self::ID, - match_index: usize, - end: usize, - ) -> Option; - - /// Returns the number of matches for the given state. If the given state - /// is not a match state, then this returns 0. - /// - /// The state ID given must be valid, or else implementors must panic. - fn match_count(&self, id: Self::ID) -> usize; - - /// Given the current state that this automaton is in and the next input - /// byte, this method returns the identifier of the next state. The - /// identifier returned must always be valid and may never correspond to - /// the fail state. The returned identifier may, however, point to the - /// dead state. - /// - /// This is not safe so that implementors may look up the next state - /// without memory safety checks such as bounds checks. As such, callers - /// must ensure that the given identifier corresponds to a valid automaton - /// state. Implementors must, in turn, ensure that this routine is safe for - /// all valid state identifiers and for all possible `u8` values. - fn next_state(&self, current: Self::ID, input: u8) -> Self::ID; - - /// Like next_state, but debug_asserts that the underlying - /// implementation never returns a `fail_id()` for the next state. - fn next_state_no_fail(&self, current: Self::ID, input: u8) -> Self::ID { - let next = self.next_state(current, input); - // We should never see a transition to the failure state. - debug_assert!( - next != fail_id(), - "automaton should never return fail_id for next state" - ); - next - } - - /// Execute a search using standard match semantics. - /// - /// This can be used even when the automaton was constructed with leftmost - /// match semantics when you want to find the earliest possible match. This - /// can also be used as part of an overlapping search implementation. - /// - /// N.B. This does not report a match if `state_id` is given as a matching - /// state. As such, this should not be used directly. - #[inline(always)] - fn standard_find_at( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - at: usize, - state_id: &mut Self::ID, - ) -> Option { - if let Some(pre) = self.prefilter() { - self.standard_find_at_imp( - prestate, - Some(pre), - haystack, - at, - state_id, - ) - } else { - self.standard_find_at_imp(prestate, None, haystack, at, state_id) - } - } - - // It's important for this to always be inlined. Namely, its only caller - // is standard_find_at, and the inlining should remove the case analysis - // for prefilter scanning when there is no prefilter available. - #[inline(always)] - fn standard_find_at_imp( - &self, - prestate: &mut PrefilterState, - prefilter: Option<&dyn Prefilter>, - haystack: &[u8], - mut at: usize, - state_id: &mut Self::ID, - ) -> Option { - while at < haystack.len() { - if let Some(pre) = prefilter { - if prestate.is_effective(at) && *state_id == self.start_state() - { - let c = prefilter::next(prestate, pre, haystack, at) - .into_option(); - match c { - None => return None, - Some(i) => { - at = i; - } - } - } - } - // CORRECTNESS: next_state is correct for all possible u8 values, - // so the only thing we're concerned about is the validity of - // `state_id`. `state_id` either comes from the caller (in which - // case, we assume it is correct), or it comes from the return - // value of next_state, which is guaranteed to be correct. - *state_id = self.next_state_no_fail(*state_id, haystack[at]); - at += 1; - // This routine always quits immediately after seeing a - // match, and since dead states can only come after seeing - // a match, seeing a dead state here is impossible. (Unless - // we have an anchored automaton, in which case, dead states - // are used to stop a search.) - debug_assert!( - *state_id != dead_id() || self.anchored(), - "standard find should never see a dead state" - ); - - if self.is_match_or_dead_state(*state_id) { - return if *state_id == dead_id() { - None - } else { - self.get_match(*state_id, 0, at) - }; - } - } - None - } - - /// Execute a search using leftmost (either first or longest) match - /// semantics. - /// - /// The principle difference between searching with standard semantics and - /// searching with leftmost semantics is that leftmost searching will - /// continue searching even after a match has been found. Once a match - /// is found, the search does not stop until either the haystack has been - /// exhausted or a dead state is observed in the automaton. (Dead states - /// only exist in automatons constructed with leftmost semantics.) That is, - /// we rely on the construction of the automaton to tell us when to quit. - #[inline(never)] - fn leftmost_find_at( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - at: usize, - state_id: &mut Self::ID, - ) -> Option { - if let Some(pre) = self.prefilter() { - self.leftmost_find_at_imp( - prestate, - Some(pre), - haystack, - at, - state_id, - ) - } else { - self.leftmost_find_at_imp(prestate, None, haystack, at, state_id) - } - } - - // It's important for this to always be inlined. Namely, its only caller - // is leftmost_find_at, and the inlining should remove the case analysis - // for prefilter scanning when there is no prefilter available. - #[inline(always)] - fn leftmost_find_at_imp( - &self, - prestate: &mut PrefilterState, - prefilter: Option<&dyn Prefilter>, - haystack: &[u8], - mut at: usize, - state_id: &mut Self::ID, - ) -> Option { - debug_assert!(self.match_kind().is_leftmost()); - if self.anchored() && at > 0 && *state_id == self.start_state() { - return None; - } - let mut last_match = self.get_match(*state_id, 0, at); - while at < haystack.len() { - if let Some(pre) = prefilter { - if prestate.is_effective(at) && *state_id == self.start_state() - { - let c = prefilter::next(prestate, pre, haystack, at) - .into_option(); - match c { - None => return None, - Some(i) => { - at = i; - } - } - } - } - // CORRECTNESS: next_state is correct for all possible u8 values, - // so the only thing we're concerned about is the validity of - // `state_id`. `state_id` either comes from the caller (in which - // case, we assume it is correct), or it comes from the return - // value of next_state, which is guaranteed to be correct. - *state_id = self.next_state_no_fail(*state_id, haystack[at]); - at += 1; - if self.is_match_or_dead_state(*state_id) { - if *state_id == dead_id() { - // The only way to enter into a dead state is if a match - // has been found, so we assert as much. This is different - // from normal automata, where you might enter a dead state - // if you know a subsequent match will never be found - // (regardless of whether a match has already been found). - // For Aho-Corasick, it is built so that we can match at - // any position, so the possibility of a match always - // exists. - // - // (Unless we have an anchored automaton, in which case, - // dead states are used to stop a search.) - debug_assert!( - last_match.is_some() || self.anchored(), - "dead state should only be seen after match" - ); - return last_match; - } - last_match = self.get_match(*state_id, 0, at); - } - } - last_match - } - - /// This is like leftmost_find_at, but does not need to track a caller - /// provided state id. In other words, the only output of this routine is a - /// match, if one exists. - /// - /// It is regrettable that we need to effectively copy a chunk of - /// implementation twice, but when we don't need to track the state ID, we - /// can allow the prefilter to report matches immediately without having - /// to re-confirm them with the automaton. The re-confirmation step is - /// necessary in leftmost_find_at because tracing through the automaton is - /// the only way to correctly set the state ID. (Perhaps an alternative - /// would be to keep a map from pattern ID to matching state ID, but that - /// complicates the code and still doesn't permit us to defer to the - /// prefilter entirely when possible.) - /// - /// I did try a few things to avoid the code duplication here, but nothing - /// optimized as well as this approach. (In microbenchmarks, there was - /// about a 25% difference.) - #[inline(never)] - fn leftmost_find_at_no_state( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - at: usize, - ) -> Option { - if let Some(pre) = self.prefilter() { - self.leftmost_find_at_no_state_imp( - prestate, - Some(pre), - haystack, - at, - ) - } else { - self.leftmost_find_at_no_state_imp(prestate, None, haystack, at) - } - } - - // It's important for this to always be inlined. Namely, its only caller - // is leftmost_find_at_no_state, and the inlining should remove the case - // analysis for prefilter scanning when there is no prefilter available. - #[inline(always)] - fn leftmost_find_at_no_state_imp( - &self, - prestate: &mut PrefilterState, - prefilter: Option<&dyn Prefilter>, - haystack: &[u8], - mut at: usize, - ) -> Option { - debug_assert!(self.match_kind().is_leftmost()); - if self.anchored() && at > 0 { - return None; - } - // If our prefilter handles confirmation of matches 100% of the - // time, and since we don't need to track state IDs, we can avoid - // Aho-Corasick completely. - if let Some(pre) = prefilter { - // We should never have a prefilter during an anchored search. - debug_assert!(!self.anchored()); - if !pre.reports_false_positives() { - return match pre.next_candidate(prestate, haystack, at) { - Candidate::None => None, - Candidate::Match(m) => Some(m), - Candidate::PossibleStartOfMatch(_) => unreachable!(), - }; - } - } - - let mut state_id = self.start_state(); - let mut last_match = self.get_match(state_id, 0, at); - while at < haystack.len() { - if let Some(pre) = prefilter { - if prestate.is_effective(at) && state_id == self.start_state() - { - match prefilter::next(prestate, pre, haystack, at) { - Candidate::None => return None, - // Since we aren't tracking a state ID, we can - // quit early once we know we have a match. - Candidate::Match(m) => return Some(m), - Candidate::PossibleStartOfMatch(i) => { - at = i; - } - } - } - } - // CORRECTNESS: next_state is correct for all possible u8 values, - // so the only thing we're concerned about is the validity of - // `state_id`. `state_id` either comes from the caller (in which - // case, we assume it is correct), or it comes from the return - // value of next_state, which is guaranteed to be correct. - state_id = self.next_state_no_fail(state_id, haystack[at]); - at += 1; - if self.is_match_or_dead_state(state_id) { - if state_id == dead_id() { - // The only way to enter into a dead state is if a - // match has been found, so we assert as much. This - // is different from normal automata, where you might - // enter a dead state if you know a subsequent match - // will never be found (regardless of whether a match - // has already been found). For Aho-Corasick, it is - // built so that we can match at any position, so the - // possibility of a match always exists. - // - // (Unless we have an anchored automaton, in which - // case, dead states are used to stop a search.) - debug_assert!( - last_match.is_some() || self.anchored(), - "dead state should only be seen after match" - ); - return last_match; - } - last_match = self.get_match(state_id, 0, at); - } - } - last_match - } - - /// Execute an overlapping search. - /// - /// When executing an overlapping match, the previous state ID in addition - /// to the previous match index should be given. If there are more matches - /// at the given state, then the match is reported and the given index is - /// incremented. - #[inline(always)] - fn overlapping_find_at( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - at: usize, - state_id: &mut Self::ID, - match_index: &mut usize, - ) -> Option { - if self.anchored() && at > 0 && *state_id == self.start_state() { - return None; - } - - let match_count = self.match_count(*state_id); - if *match_index < match_count { - // This is guaranteed to return a match since - // match_index < match_count. - let result = self.get_match(*state_id, *match_index, at); - debug_assert!(result.is_some(), "must be a match"); - *match_index += 1; - return result; - } - - *match_index = 0; - match self.standard_find_at(prestate, haystack, at, state_id) { - None => None, - Some(m) => { - *match_index = 1; - Some(m) - } - } - } - - /// Return the earliest match found. This returns as soon as we know that - /// we have a match. As such, this does not necessarily correspond to the - /// leftmost starting match, but rather, the leftmost position at which a - /// match ends. - #[inline(always)] - fn earliest_find_at( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - at: usize, - state_id: &mut Self::ID, - ) -> Option { - if *state_id == self.start_state() { - if self.anchored() && at > 0 { - return None; - } - if let Some(m) = self.get_match(*state_id, 0, at) { - return Some(m); - } - } - self.standard_find_at(prestate, haystack, at, state_id) - } - - /// A convenience function for finding the next match according to the - /// match semantics of this automaton. For standard match semantics, this - /// finds the earliest match. Otherwise, the leftmost match is found. - #[inline(always)] - fn find_at( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - at: usize, - state_id: &mut Self::ID, - ) -> Option { - match *self.match_kind() { - MatchKind::Standard => { - self.earliest_find_at(prestate, haystack, at, state_id) - } - MatchKind::LeftmostFirst | MatchKind::LeftmostLongest => { - self.leftmost_find_at(prestate, haystack, at, state_id) - } - MatchKind::__Nonexhaustive => unreachable!(), - } - } - - /// Like find_at, but does not track state identifiers. This permits some - /// optimizations when a prefilter that confirms its own matches is - /// present. - #[inline(always)] - fn find_at_no_state( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - at: usize, - ) -> Option { - match *self.match_kind() { - MatchKind::Standard => { - let mut state = self.start_state(); - self.earliest_find_at(prestate, haystack, at, &mut state) - } - MatchKind::LeftmostFirst | MatchKind::LeftmostLongest => { - self.leftmost_find_at_no_state(prestate, haystack, at) - } - MatchKind::__Nonexhaustive => unreachable!(), - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/buffer.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/buffer.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/buffer.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/buffer.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,132 +0,0 @@ -use std::cmp; -use std::io; -use std::ptr; - -/// The default buffer capacity that we use for the stream buffer. -const DEFAULT_BUFFER_CAPACITY: usize = 8 * (1 << 10); // 8 KB - -/// A fairly simple roll buffer for supporting stream searches. -/// -/// This buffer acts as a temporary place to store a fixed amount of data when -/// reading from a stream. Its central purpose is to allow "rolling" some -/// suffix of the data to the beginning of the buffer before refilling it with -/// more data from the stream. For example, let's say we are trying to match -/// "foobar" on a stream. When we report the match, we'd like to not only -/// report the correct offsets at which the match occurs, but also the matching -/// bytes themselves. So let's say our stream is a file with the following -/// contents: `test test foobar test test`. Now assume that we happen to read -/// the aforementioned file in two chunks: `test test foo` and `bar test test`. -/// Naively, it would not be possible to report a single contiguous `foobar` -/// match, but this roll buffer allows us to do that. Namely, after the second -/// read, the contents of the buffer should be `st foobar test test`, where the -/// search should ultimately resume immediately after `foo`. (The prefix `st ` -/// is included because the roll buffer saves N bytes at the end of the buffer, -/// where N is the maximum possible length of a match.) -/// -/// A lot of the logic for dealing with this is unfortunately split out between -/// this roll buffer and the `StreamChunkIter`. -#[derive(Debug)] -pub struct Buffer { - /// The raw buffer contents. This has a fixed size and never increases. - buf: Vec, - /// The minimum size of the buffer, which is equivalent to the maximum - /// possible length of a match. This corresponds to the amount that we - /// roll - min: usize, - /// The end of the contents of this buffer. - end: usize, -} - -impl Buffer { - /// Create a new buffer for stream searching. The minimum buffer length - /// given should be the size of the maximum possible match length. - pub fn new(min_buffer_len: usize) -> Buffer { - let min = cmp::max(1, min_buffer_len); - // The minimum buffer amount is also the amount that we roll our - // buffer in order to support incremental searching. To this end, - // our actual capacity needs to be at least 1 byte bigger than our - // minimum amount, otherwise we won't have any overlap. In actuality, - // we want our buffer to be a bit bigger than that for performance - // reasons, so we set a lower bound of `8 * min`. - // - // TODO: It would be good to find a way to test the streaming - // implementation with the minimal buffer size. For now, we just - // uncomment out the next line and comment out the subsequent line. - // let capacity = 1 + min; - let capacity = cmp::max(min * 8, DEFAULT_BUFFER_CAPACITY); - Buffer { buf: vec![0; capacity], min, end: 0 } - } - - /// Return the contents of this buffer. - #[inline] - pub fn buffer(&self) -> &[u8] { - &self.buf[..self.end] - } - - /// Return the minimum size of the buffer. The only way a buffer may be - /// smaller than this is if the stream itself contains less than the - /// minimum buffer amount. - #[inline] - pub fn min_buffer_len(&self) -> usize { - self.min - } - - /// Return the total length of the contents in the buffer. - #[inline] - pub fn len(&self) -> usize { - self.end - } - - /// Return all free capacity in this buffer. - fn free_buffer(&mut self) -> &mut [u8] { - &mut self.buf[self.end..] - } - - /// Refill the contents of this buffer by reading as much as possible into - /// this buffer's free capacity. If no more bytes could be read, then this - /// returns false. Otherwise, this reads until it has filled the buffer - /// past the minimum amount. - pub fn fill(&mut self, mut rdr: R) -> io::Result { - let mut readany = false; - loop { - let readlen = rdr.read(self.free_buffer())?; - if readlen == 0 { - return Ok(readany); - } - readany = true; - self.end += readlen; - if self.len() >= self.min { - return Ok(true); - } - } - } - - /// Roll the contents of the buffer so that the suffix of this buffer is - /// moved to the front and all other contents are dropped. The size of the - /// suffix corresponds precisely to the minimum buffer length. - /// - /// This should only be called when the entire contents of this buffer have - /// been searched. - pub fn roll(&mut self) { - let roll_start = self - .end - .checked_sub(self.min) - .expect("buffer capacity should be bigger than minimum amount"); - let roll_len = self.min; - - assert!(roll_start + roll_len <= self.end); - unsafe { - // SAFETY: A buffer contains Copy data, so there's no problem - // moving it around. Safety also depends on our indices being in - // bounds, which they always should be, given the assert above. - // - // TODO: Switch to [T]::copy_within once our MSRV is high enough. - ptr::copy( - self.buf[roll_start..].as_ptr(), - self.buf.as_mut_ptr(), - roll_len, - ); - } - self.end = roll_len; - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/byte_frequencies.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/byte_frequencies.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/byte_frequencies.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/byte_frequencies.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,258 +0,0 @@ -pub const BYTE_FREQUENCIES: [u8; 256] = [ - 55, // '\x00' - 52, // '\x01' - 51, // '\x02' - 50, // '\x03' - 49, // '\x04' - 48, // '\x05' - 47, // '\x06' - 46, // '\x07' - 45, // '\x08' - 103, // '\t' - 242, // '\n' - 66, // '\x0b' - 67, // '\x0c' - 229, // '\r' - 44, // '\x0e' - 43, // '\x0f' - 42, // '\x10' - 41, // '\x11' - 40, // '\x12' - 39, // '\x13' - 38, // '\x14' - 37, // '\x15' - 36, // '\x16' - 35, // '\x17' - 34, // '\x18' - 33, // '\x19' - 56, // '\x1a' - 32, // '\x1b' - 31, // '\x1c' - 30, // '\x1d' - 29, // '\x1e' - 28, // '\x1f' - 255, // ' ' - 148, // '!' - 164, // '"' - 149, // '#' - 136, // '$' - 160, // '%' - 155, // '&' - 173, // "'" - 221, // '(' - 222, // ')' - 134, // '*' - 122, // '+' - 232, // ',' - 202, // '-' - 215, // '.' - 224, // '/' - 208, // '0' - 220, // '1' - 204, // '2' - 187, // '3' - 183, // '4' - 179, // '5' - 177, // '6' - 168, // '7' - 178, // '8' - 200, // '9' - 226, // ':' - 195, // ';' - 154, // '<' - 184, // '=' - 174, // '>' - 126, // '?' - 120, // '@' - 191, // 'A' - 157, // 'B' - 194, // 'C' - 170, // 'D' - 189, // 'E' - 162, // 'F' - 161, // 'G' - 150, // 'H' - 193, // 'I' - 142, // 'J' - 137, // 'K' - 171, // 'L' - 176, // 'M' - 185, // 'N' - 167, // 'O' - 186, // 'P' - 112, // 'Q' - 175, // 'R' - 192, // 'S' - 188, // 'T' - 156, // 'U' - 140, // 'V' - 143, // 'W' - 123, // 'X' - 133, // 'Y' - 128, // 'Z' - 147, // '[' - 138, // '\\' - 146, // ']' - 114, // '^' - 223, // '_' - 151, // '`' - 249, // 'a' - 216, // 'b' - 238, // 'c' - 236, // 'd' - 253, // 'e' - 227, // 'f' - 218, // 'g' - 230, // 'h' - 247, // 'i' - 135, // 'j' - 180, // 'k' - 241, // 'l' - 233, // 'm' - 246, // 'n' - 244, // 'o' - 231, // 'p' - 139, // 'q' - 245, // 'r' - 243, // 's' - 251, // 't' - 235, // 'u' - 201, // 'v' - 196, // 'w' - 240, // 'x' - 214, // 'y' - 152, // 'z' - 182, // '{' - 205, // '|' - 181, // '}' - 127, // '~' - 27, // '\x7f' - 212, // '\x80' - 211, // '\x81' - 210, // '\x82' - 213, // '\x83' - 228, // '\x84' - 197, // '\x85' - 169, // '\x86' - 159, // '\x87' - 131, // '\x88' - 172, // '\x89' - 105, // '\x8a' - 80, // '\x8b' - 98, // '\x8c' - 96, // '\x8d' - 97, // '\x8e' - 81, // '\x8f' - 207, // '\x90' - 145, // '\x91' - 116, // '\x92' - 115, // '\x93' - 144, // '\x94' - 130, // '\x95' - 153, // '\x96' - 121, // '\x97' - 107, // '\x98' - 132, // '\x99' - 109, // '\x9a' - 110, // '\x9b' - 124, // '\x9c' - 111, // '\x9d' - 82, // '\x9e' - 108, // '\x9f' - 118, // '\xa0' - 141, // '¡' - 113, // '¢' - 129, // '£' - 119, // '¤' - 125, // '¥' - 165, // '¦' - 117, // '§' - 92, // '¨' - 106, // '©' - 83, // 'ª' - 72, // '«' - 99, // '¬' - 93, // '\xad' - 65, // '®' - 79, // '¯' - 166, // '°' - 237, // '±' - 163, // '²' - 199, // '³' - 190, // '´' - 225, // 'µ' - 209, // '¶' - 203, // '·' - 198, // '¸' - 217, // '¹' - 219, // 'º' - 206, // '»' - 234, // '¼' - 248, // '½' - 158, // '¾' - 239, // '¿' - 255, // 'À' - 255, // 'Á' - 255, // 'Â' - 255, // 'Ã' - 255, // 'Ä' - 255, // 'Å' - 255, // 'Æ' - 255, // 'Ç' - 255, // 'È' - 255, // 'É' - 255, // 'Ê' - 255, // 'Ë' - 255, // 'Ì' - 255, // 'Í' - 255, // 'Î' - 255, // 'Ï' - 255, // 'Ð' - 255, // 'Ñ' - 255, // 'Ò' - 255, // 'Ó' - 255, // 'Ô' - 255, // 'Õ' - 255, // 'Ö' - 255, // '×' - 255, // 'Ø' - 255, // 'Ù' - 255, // 'Ú' - 255, // 'Û' - 255, // 'Ü' - 255, // 'Ý' - 255, // 'Þ' - 255, // 'ß' - 255, // 'à' - 255, // 'á' - 255, // 'â' - 255, // 'ã' - 255, // 'ä' - 255, // 'å' - 255, // 'æ' - 255, // 'ç' - 255, // 'è' - 255, // 'é' - 255, // 'ê' - 255, // 'ë' - 255, // 'ì' - 255, // 'í' - 255, // 'î' - 255, // 'ï' - 255, // 'ð' - 255, // 'ñ' - 255, // 'ò' - 255, // 'ó' - 255, // 'ô' - 255, // 'õ' - 255, // 'ö' - 255, // '÷' - 255, // 'ø' - 255, // 'ù' - 255, // 'ú' - 255, // 'û' - 255, // 'ü' - 255, // 'ý' - 255, // 'þ' - 255, // 'ÿ' -]; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/classes.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/classes.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/classes.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/classes.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,238 +0,0 @@ -use std::fmt; - -/// A representation of byte oriented equivalence classes. -/// -/// This is used in an FSM to reduce the size of the transition table. This can -/// have a particularly large impact not only on the total size of an FSM, but -/// also on compile times. -#[derive(Clone, Copy)] -pub struct ByteClasses([u8; 256]); - -impl ByteClasses { - /// Creates a new set of equivalence classes where all bytes are mapped to - /// the same class. - pub fn empty() -> ByteClasses { - ByteClasses([0; 256]) - } - - /// Creates a new set of equivalence classes where each byte belongs to - /// its own equivalence class. - pub fn singletons() -> ByteClasses { - let mut classes = ByteClasses::empty(); - for i in 0..256 { - classes.set(i as u8, i as u8); - } - classes - } - - /// Set the equivalence class for the given byte. - #[inline] - pub fn set(&mut self, byte: u8, class: u8) { - self.0[byte as usize] = class; - } - - /// Get the equivalence class for the given byte. - #[inline] - pub fn get(&self, byte: u8) -> u8 { - // SAFETY: This is safe because all dense transitions have - // exactly 256 elements, so all u8 values are valid indices. - self.0[byte as usize] - } - - /// Return the total number of elements in the alphabet represented by - /// these equivalence classes. Equivalently, this returns the total number - /// of equivalence classes. - #[inline] - pub fn alphabet_len(&self) -> usize { - self.0[255] as usize + 1 - } - - /// Returns true if and only if every byte in this class maps to its own - /// equivalence class. Equivalently, there are 256 equivalence classes - /// and each class contains exactly one byte. - #[inline] - pub fn is_singleton(&self) -> bool { - self.alphabet_len() == 256 - } - - /// Returns an iterator over a sequence of representative bytes from each - /// equivalence class. Namely, this yields exactly N items, where N is - /// equivalent to the number of equivalence classes. Each item is an - /// arbitrary byte drawn from each equivalence class. - /// - /// This is useful when one is determinizing an NFA and the NFA's alphabet - /// hasn't been converted to equivalence classes yet. Picking an arbitrary - /// byte from each equivalence class then permits a full exploration of - /// the NFA instead of using every possible byte value. - pub fn representatives(&self) -> ByteClassRepresentatives<'_> { - ByteClassRepresentatives { classes: self, byte: 0, last_class: None } - } - - /// Returns all of the bytes in the given equivalence class. - /// - /// The second element in the tuple indicates the number of elements in - /// the array. - fn elements(&self, equiv: u8) -> ([u8; 256], usize) { - let (mut array, mut len) = ([0; 256], 0); - for b in 0..256 { - if self.get(b as u8) == equiv { - array[len] = b as u8; - len += 1; - } - } - (array, len) - } -} - -impl fmt::Debug for ByteClasses { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.is_singleton() { - write!(f, "ByteClasses({{singletons}})") - } else { - write!(f, "ByteClasses(")?; - for equiv in 0..self.alphabet_len() { - let (members, len) = self.elements(equiv as u8); - write!(f, " {} => {:?}", equiv, &members[..len])?; - } - write!(f, ")") - } - } -} - -/// An iterator over representative bytes from each equivalence class. -#[derive(Debug)] -pub struct ByteClassRepresentatives<'a> { - classes: &'a ByteClasses, - byte: usize, - last_class: Option, -} - -impl<'a> Iterator for ByteClassRepresentatives<'a> { - type Item = u8; - - fn next(&mut self) -> Option { - while self.byte < 256 { - let byte = self.byte as u8; - let class = self.classes.get(byte); - self.byte += 1; - - if self.last_class != Some(class) { - self.last_class = Some(class); - return Some(byte); - } - } - None - } -} - -/// A byte class builder keeps track of an *approximation* of equivalence -/// classes of bytes during NFA construction. That is, every byte in an -/// equivalence class cannot discriminate between a match and a non-match. -/// -/// For example, in the literals `abc` and `xyz`, the bytes [\x00-`], [d-w] -/// and [{-\xFF] never discriminate between a match and a non-match, precisely -/// because they never occur in the literals anywhere. -/// -/// Note though that this does not necessarily compute the minimal set of -/// equivalence classes. For example, in the literals above, the byte ranges -/// [\x00-`], [d-w] and [{-\xFF] are all treated as distinct equivalence -/// classes even though they could be treated a single class. The reason for -/// this is implementation complexity. In the future, we should endeavor to -/// compute the minimal equivalence classes since they can have a rather large -/// impact on the size of the DFA. -/// -/// The representation here is 256 booleans, all initially set to false. Each -/// boolean maps to its corresponding byte based on position. A `true` value -/// indicates the end of an equivalence class, where its corresponding byte -/// and all of the bytes corresponding to all previous contiguous `false` -/// values are in the same equivalence class. -/// -/// This particular representation only permits contiguous ranges of bytes to -/// be in the same equivalence class, which means that we can never discover -/// the true minimal set of equivalence classes. -#[derive(Debug)] -pub struct ByteClassBuilder(Vec); - -impl ByteClassBuilder { - /// Create a new builder of byte classes where all bytes are part of the - /// same equivalence class. - pub fn new() -> ByteClassBuilder { - ByteClassBuilder(vec![false; 256]) - } - - /// Indicate the the range of byte given (inclusive) can discriminate a - /// match between it and all other bytes outside of the range. - pub fn set_range(&mut self, start: u8, end: u8) { - debug_assert!(start <= end); - if start > 0 { - self.0[start as usize - 1] = true; - } - self.0[end as usize] = true; - } - - /// Build byte classes that map all byte values to their corresponding - /// equivalence class. The last mapping indicates the largest equivalence - /// class identifier (which is never bigger than 255). - pub fn build(&self) -> ByteClasses { - let mut classes = ByteClasses::empty(); - let mut class = 0u8; - let mut i = 0; - loop { - classes.set(i as u8, class as u8); - if i >= 255 { - break; - } - if self.0[i] { - class = class.checked_add(1).unwrap(); - } - i += 1; - } - classes - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn byte_classes() { - let mut set = ByteClassBuilder::new(); - set.set_range(b'a', b'z'); - - let classes = set.build(); - assert_eq!(classes.get(0), 0); - assert_eq!(classes.get(1), 0); - assert_eq!(classes.get(2), 0); - assert_eq!(classes.get(b'a' - 1), 0); - assert_eq!(classes.get(b'a'), 1); - assert_eq!(classes.get(b'm'), 1); - assert_eq!(classes.get(b'z'), 1); - assert_eq!(classes.get(b'z' + 1), 2); - assert_eq!(classes.get(254), 2); - assert_eq!(classes.get(255), 2); - - let mut set = ByteClassBuilder::new(); - set.set_range(0, 2); - set.set_range(4, 6); - let classes = set.build(); - assert_eq!(classes.get(0), 0); - assert_eq!(classes.get(1), 0); - assert_eq!(classes.get(2), 0); - assert_eq!(classes.get(3), 1); - assert_eq!(classes.get(4), 2); - assert_eq!(classes.get(5), 2); - assert_eq!(classes.get(6), 2); - assert_eq!(classes.get(7), 3); - assert_eq!(classes.get(255), 3); - } - - #[test] - fn full_byte_classes() { - let mut set = ByteClassBuilder::new(); - for i in 0..256u16 { - set.set_range(i as u8, i as u8); - } - assert_eq!(set.build().alphabet_len(), 256); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/dfa.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/dfa.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/dfa.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/dfa.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,713 +0,0 @@ -use std::mem::size_of; - -use crate::ahocorasick::MatchKind; -use crate::automaton::Automaton; -use crate::classes::ByteClasses; -use crate::error::Result; -use crate::nfa::{PatternID, PatternLength, NFA}; -use crate::prefilter::{Prefilter, PrefilterObj, PrefilterState}; -use crate::state_id::{dead_id, fail_id, premultiply_overflow_error, StateID}; -use crate::Match; - -#[derive(Clone, Debug)] -pub enum DFA { - Standard(Standard), - ByteClass(ByteClass), - Premultiplied(Premultiplied), - PremultipliedByteClass(PremultipliedByteClass), -} - -impl DFA { - fn repr(&self) -> &Repr { - match *self { - DFA::Standard(ref dfa) => dfa.repr(), - DFA::ByteClass(ref dfa) => dfa.repr(), - DFA::Premultiplied(ref dfa) => dfa.repr(), - DFA::PremultipliedByteClass(ref dfa) => dfa.repr(), - } - } - - pub fn match_kind(&self) -> &MatchKind { - &self.repr().match_kind - } - - pub fn heap_bytes(&self) -> usize { - self.repr().heap_bytes - } - - pub fn max_pattern_len(&self) -> usize { - self.repr().max_pattern_len - } - - pub fn pattern_count(&self) -> usize { - self.repr().pattern_count - } - - pub fn prefilter(&self) -> Option<&dyn Prefilter> { - self.repr().prefilter.as_ref().map(|p| p.as_ref()) - } - - pub fn start_state(&self) -> S { - self.repr().start_id - } - - #[inline(always)] - pub fn overlapping_find_at( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - at: usize, - state_id: &mut S, - match_index: &mut usize, - ) -> Option { - match *self { - DFA::Standard(ref dfa) => dfa.overlapping_find_at( - prestate, - haystack, - at, - state_id, - match_index, - ), - DFA::ByteClass(ref dfa) => dfa.overlapping_find_at( - prestate, - haystack, - at, - state_id, - match_index, - ), - DFA::Premultiplied(ref dfa) => dfa.overlapping_find_at( - prestate, - haystack, - at, - state_id, - match_index, - ), - DFA::PremultipliedByteClass(ref dfa) => dfa.overlapping_find_at( - prestate, - haystack, - at, - state_id, - match_index, - ), - } - } - - #[inline(always)] - pub fn earliest_find_at( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - at: usize, - state_id: &mut S, - ) -> Option { - match *self { - DFA::Standard(ref dfa) => { - dfa.earliest_find_at(prestate, haystack, at, state_id) - } - DFA::ByteClass(ref dfa) => { - dfa.earliest_find_at(prestate, haystack, at, state_id) - } - DFA::Premultiplied(ref dfa) => { - dfa.earliest_find_at(prestate, haystack, at, state_id) - } - DFA::PremultipliedByteClass(ref dfa) => { - dfa.earliest_find_at(prestate, haystack, at, state_id) - } - } - } - - #[inline(always)] - pub fn find_at_no_state( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - at: usize, - ) -> Option { - match *self { - DFA::Standard(ref dfa) => { - dfa.find_at_no_state(prestate, haystack, at) - } - DFA::ByteClass(ref dfa) => { - dfa.find_at_no_state(prestate, haystack, at) - } - DFA::Premultiplied(ref dfa) => { - dfa.find_at_no_state(prestate, haystack, at) - } - DFA::PremultipliedByteClass(ref dfa) => { - dfa.find_at_no_state(prestate, haystack, at) - } - } - } -} - -#[derive(Clone, Debug)] -pub struct Standard(Repr); - -impl Standard { - fn repr(&self) -> &Repr { - &self.0 - } -} - -impl Automaton for Standard { - type ID = S; - - fn match_kind(&self) -> &MatchKind { - &self.repr().match_kind - } - - fn anchored(&self) -> bool { - self.repr().anchored - } - - fn prefilter(&self) -> Option<&dyn Prefilter> { - self.repr().prefilter.as_ref().map(|p| p.as_ref()) - } - - fn start_state(&self) -> S { - self.repr().start_id - } - - fn is_valid(&self, id: S) -> bool { - id.to_usize() < self.repr().state_count - } - - fn is_match_state(&self, id: S) -> bool { - self.repr().is_match_state(id) - } - - fn is_match_or_dead_state(&self, id: S) -> bool { - self.repr().is_match_or_dead_state(id) - } - - fn get_match( - &self, - id: S, - match_index: usize, - end: usize, - ) -> Option { - self.repr().get_match(id, match_index, end) - } - - fn match_count(&self, id: S) -> usize { - self.repr().match_count(id) - } - - fn next_state(&self, current: S, input: u8) -> S { - let o = current.to_usize() * 256 + input as usize; - self.repr().trans[o] - } -} - -#[derive(Clone, Debug)] -pub struct ByteClass(Repr); - -impl ByteClass { - fn repr(&self) -> &Repr { - &self.0 - } -} - -impl Automaton for ByteClass { - type ID = S; - - fn match_kind(&self) -> &MatchKind { - &self.repr().match_kind - } - - fn anchored(&self) -> bool { - self.repr().anchored - } - - fn prefilter(&self) -> Option<&dyn Prefilter> { - self.repr().prefilter.as_ref().map(|p| p.as_ref()) - } - - fn start_state(&self) -> S { - self.repr().start_id - } - - fn is_valid(&self, id: S) -> bool { - id.to_usize() < self.repr().state_count - } - - fn is_match_state(&self, id: S) -> bool { - self.repr().is_match_state(id) - } - - fn is_match_or_dead_state(&self, id: S) -> bool { - self.repr().is_match_or_dead_state(id) - } - - fn get_match( - &self, - id: S, - match_index: usize, - end: usize, - ) -> Option { - self.repr().get_match(id, match_index, end) - } - - fn match_count(&self, id: S) -> usize { - self.repr().match_count(id) - } - - fn next_state(&self, current: S, input: u8) -> S { - let alphabet_len = self.repr().byte_classes.alphabet_len(); - let input = self.repr().byte_classes.get(input); - let o = current.to_usize() * alphabet_len + input as usize; - self.repr().trans[o] - } -} - -#[derive(Clone, Debug)] -pub struct Premultiplied(Repr); - -impl Premultiplied { - fn repr(&self) -> &Repr { - &self.0 - } -} - -impl Automaton for Premultiplied { - type ID = S; - - fn match_kind(&self) -> &MatchKind { - &self.repr().match_kind - } - - fn anchored(&self) -> bool { - self.repr().anchored - } - - fn prefilter(&self) -> Option<&dyn Prefilter> { - self.repr().prefilter.as_ref().map(|p| p.as_ref()) - } - - fn start_state(&self) -> S { - self.repr().start_id - } - - fn is_valid(&self, id: S) -> bool { - (id.to_usize() / 256) < self.repr().state_count - } - - fn is_match_state(&self, id: S) -> bool { - self.repr().is_match_state(id) - } - - fn is_match_or_dead_state(&self, id: S) -> bool { - self.repr().is_match_or_dead_state(id) - } - - fn get_match( - &self, - id: S, - match_index: usize, - end: usize, - ) -> Option { - if id > self.repr().max_match { - return None; - } - self.repr() - .matches - .get(id.to_usize() / 256) - .and_then(|m| m.get(match_index)) - .map(|&(id, len)| Match { pattern: id, len, end }) - } - - fn match_count(&self, id: S) -> usize { - let o = id.to_usize() / 256; - self.repr().matches[o].len() - } - - fn next_state(&self, current: S, input: u8) -> S { - let o = current.to_usize() + input as usize; - self.repr().trans[o] - } -} - -#[derive(Clone, Debug)] -pub struct PremultipliedByteClass(Repr); - -impl PremultipliedByteClass { - fn repr(&self) -> &Repr { - &self.0 - } -} - -impl Automaton for PremultipliedByteClass { - type ID = S; - - fn match_kind(&self) -> &MatchKind { - &self.repr().match_kind - } - - fn anchored(&self) -> bool { - self.repr().anchored - } - - fn prefilter(&self) -> Option<&dyn Prefilter> { - self.repr().prefilter.as_ref().map(|p| p.as_ref()) - } - - fn start_state(&self) -> S { - self.repr().start_id - } - - fn is_valid(&self, id: S) -> bool { - (id.to_usize() / self.repr().alphabet_len()) < self.repr().state_count - } - - fn is_match_state(&self, id: S) -> bool { - self.repr().is_match_state(id) - } - - fn is_match_or_dead_state(&self, id: S) -> bool { - self.repr().is_match_or_dead_state(id) - } - - fn get_match( - &self, - id: S, - match_index: usize, - end: usize, - ) -> Option { - if id > self.repr().max_match { - return None; - } - self.repr() - .matches - .get(id.to_usize() / self.repr().alphabet_len()) - .and_then(|m| m.get(match_index)) - .map(|&(id, len)| Match { pattern: id, len, end }) - } - - fn match_count(&self, id: S) -> usize { - let o = id.to_usize() / self.repr().alphabet_len(); - self.repr().matches[o].len() - } - - fn next_state(&self, current: S, input: u8) -> S { - let input = self.repr().byte_classes.get(input); - let o = current.to_usize() + input as usize; - self.repr().trans[o] - } -} - -#[derive(Clone, Debug)] -pub struct Repr { - match_kind: MatchKind, - anchored: bool, - premultiplied: bool, - start_id: S, - /// The length, in bytes, of the longest pattern in this automaton. This - /// information is useful for keeping correct buffer sizes when searching - /// on streams. - max_pattern_len: usize, - /// The total number of patterns added to this automaton. This includes - /// patterns that may never match. - pattern_count: usize, - state_count: usize, - max_match: S, - /// The number of bytes of heap used by this NFA's transition table. - heap_bytes: usize, - /// A prefilter for quickly detecting candidate matchs, if pertinent. - prefilter: Option, - byte_classes: ByteClasses, - trans: Vec, - matches: Vec>, -} - -impl Repr { - /// Returns the total alphabet size for this DFA. - /// - /// If byte classes are enabled, then this corresponds to the number of - /// equivalence classes. If they are disabled, then this is always 256. - fn alphabet_len(&self) -> usize { - self.byte_classes.alphabet_len() - } - - /// Returns true only if the given state is a match state. - fn is_match_state(&self, id: S) -> bool { - id <= self.max_match && id > dead_id() - } - - /// Returns true only if the given state is either a dead state or a match - /// state. - fn is_match_or_dead_state(&self, id: S) -> bool { - id <= self.max_match - } - - /// Get the ith match for the given state, where the end position of a - /// match was found at `end`. - /// - /// # Panics - /// - /// The caller must ensure that the given state identifier is valid, - /// otherwise this may panic. The `match_index` need not be valid. That is, - /// if the given state has no matches then this returns `None`. - fn get_match( - &self, - id: S, - match_index: usize, - end: usize, - ) -> Option { - if id > self.max_match { - return None; - } - self.matches - .get(id.to_usize()) - .and_then(|m| m.get(match_index)) - .map(|&(id, len)| Match { pattern: id, len, end }) - } - - /// Return the total number of matches for the given state. - /// - /// # Panics - /// - /// The caller must ensure that the given identifier is valid, or else - /// this panics. - fn match_count(&self, id: S) -> usize { - self.matches[id.to_usize()].len() - } - - /// Get the next state given `from` as the current state and `byte` as the - /// current input byte. - fn next_state(&self, from: S, byte: u8) -> S { - let alphabet_len = self.alphabet_len(); - let byte = self.byte_classes.get(byte); - self.trans[from.to_usize() * alphabet_len + byte as usize] - } - - /// Set the `byte` transition for the `from` state to point to `to`. - fn set_next_state(&mut self, from: S, byte: u8, to: S) { - let alphabet_len = self.alphabet_len(); - let byte = self.byte_classes.get(byte); - self.trans[from.to_usize() * alphabet_len + byte as usize] = to; - } - - /// Swap the given states in place. - fn swap_states(&mut self, id1: S, id2: S) { - assert!(!self.premultiplied, "can't swap states in premultiplied DFA"); - - let o1 = id1.to_usize() * self.alphabet_len(); - let o2 = id2.to_usize() * self.alphabet_len(); - for b in 0..self.alphabet_len() { - self.trans.swap(o1 + b, o2 + b); - } - self.matches.swap(id1.to_usize(), id2.to_usize()); - } - - /// This routine shuffles all match states in this DFA to the beginning - /// of the DFA such that every non-match state appears after every match - /// state. (With one exception: the special fail and dead states remain as - /// the first two states.) - /// - /// The purpose of doing this shuffling is to avoid an extra conditional - /// in the search loop, and in particular, detecting whether a state is a - /// match or not does not need to access any memory. - /// - /// This updates `self.max_match` to point to the last matching state as - /// well as `self.start` if the starting state was moved. - fn shuffle_match_states(&mut self) { - assert!( - !self.premultiplied, - "cannot shuffle match states of premultiplied DFA" - ); - - if self.state_count <= 1 { - return; - } - - let mut first_non_match = self.start_id.to_usize(); - while first_non_match < self.state_count - && self.matches[first_non_match].len() > 0 - { - first_non_match += 1; - } - - let mut swaps: Vec = vec![fail_id(); self.state_count]; - let mut cur = self.state_count - 1; - while cur > first_non_match { - if self.matches[cur].len() > 0 { - self.swap_states( - S::from_usize(cur), - S::from_usize(first_non_match), - ); - swaps[cur] = S::from_usize(first_non_match); - swaps[first_non_match] = S::from_usize(cur); - - first_non_match += 1; - while first_non_match < cur - && self.matches[first_non_match].len() > 0 - { - first_non_match += 1; - } - } - cur -= 1; - } - for id in (0..self.state_count).map(S::from_usize) { - let alphabet_len = self.alphabet_len(); - let offset = id.to_usize() * alphabet_len; - for next in &mut self.trans[offset..offset + alphabet_len] { - if swaps[next.to_usize()] != fail_id() { - *next = swaps[next.to_usize()]; - } - } - } - if swaps[self.start_id.to_usize()] != fail_id() { - self.start_id = swaps[self.start_id.to_usize()]; - } - self.max_match = S::from_usize(first_non_match - 1); - } - - fn premultiply(&mut self) -> Result<()> { - if self.premultiplied || self.state_count <= 1 { - return Ok(()); - } - - let alpha_len = self.alphabet_len(); - premultiply_overflow_error( - S::from_usize(self.state_count - 1), - alpha_len, - )?; - - for id in (2..self.state_count).map(S::from_usize) { - let offset = id.to_usize() * alpha_len; - for next in &mut self.trans[offset..offset + alpha_len] { - if *next == dead_id() { - continue; - } - *next = S::from_usize(next.to_usize() * alpha_len); - } - } - self.premultiplied = true; - self.start_id = S::from_usize(self.start_id.to_usize() * alpha_len); - self.max_match = S::from_usize(self.max_match.to_usize() * alpha_len); - Ok(()) - } - - /// Computes the total amount of heap used by this NFA in bytes. - fn calculate_size(&mut self) { - let mut size = (self.trans.len() * size_of::()) - + (self.matches.len() - * size_of::>()); - for state_matches in &self.matches { - size += - state_matches.len() * size_of::<(PatternID, PatternLength)>(); - } - size += self.prefilter.as_ref().map_or(0, |p| p.as_ref().heap_bytes()); - self.heap_bytes = size; - } -} - -/// A builder for configuring the determinization of an NFA into a DFA. -#[derive(Clone, Debug)] -pub struct Builder { - premultiply: bool, - byte_classes: bool, -} - -impl Builder { - /// Create a new builder for a DFA. - pub fn new() -> Builder { - Builder { premultiply: true, byte_classes: true } - } - - /// Build a DFA from the given NFA. - /// - /// This returns an error if the state identifiers exceed their - /// representation size. This can only happen when state ids are - /// premultiplied (which is enabled by default). - pub fn build(&self, nfa: &NFA) -> Result> { - let byte_classes = if self.byte_classes { - nfa.byte_classes().clone() - } else { - ByteClasses::singletons() - }; - let alphabet_len = byte_classes.alphabet_len(); - let trans = vec![fail_id(); alphabet_len * nfa.state_len()]; - let matches = vec![vec![]; nfa.state_len()]; - let mut repr = Repr { - match_kind: nfa.match_kind().clone(), - anchored: nfa.anchored(), - premultiplied: false, - start_id: nfa.start_state(), - max_pattern_len: nfa.max_pattern_len(), - pattern_count: nfa.pattern_count(), - state_count: nfa.state_len(), - max_match: fail_id(), - heap_bytes: 0, - prefilter: nfa.prefilter_obj().map(|p| p.clone()), - byte_classes: byte_classes.clone(), - trans, - matches, - }; - for id in (0..nfa.state_len()).map(S::from_usize) { - repr.matches[id.to_usize()].extend_from_slice(nfa.matches(id)); - - let fail = nfa.failure_transition(id); - nfa.iter_all_transitions(&byte_classes, id, |b, mut next| { - if next == fail_id() { - next = nfa_next_state_memoized(nfa, &repr, id, fail, b); - } - repr.set_next_state(id, b, next); - }); - } - repr.shuffle_match_states(); - repr.calculate_size(); - if self.premultiply { - repr.premultiply()?; - if byte_classes.is_singleton() { - Ok(DFA::Premultiplied(Premultiplied(repr))) - } else { - Ok(DFA::PremultipliedByteClass(PremultipliedByteClass(repr))) - } - } else { - if byte_classes.is_singleton() { - Ok(DFA::Standard(Standard(repr))) - } else { - Ok(DFA::ByteClass(ByteClass(repr))) - } - } - } - - /// Whether to use byte classes or in the DFA. - pub fn byte_classes(&mut self, yes: bool) -> &mut Builder { - self.byte_classes = yes; - self - } - - /// Whether to premultiply state identifier in the DFA. - pub fn premultiply(&mut self, yes: bool) -> &mut Builder { - self.premultiply = yes; - self - } -} - -/// This returns the next NFA transition (including resolving failure -/// transitions), except once it sees a state id less than the id of the DFA -/// state that is currently being populated, then we no longer need to follow -/// failure transitions and can instead query the pre-computed state id from -/// the DFA itself. -/// -/// In general, this should only be called when a failure transition is seen. -fn nfa_next_state_memoized( - nfa: &NFA, - dfa: &Repr, - populating: S, - mut current: S, - input: u8, -) -> S { - loop { - if current < populating { - return dfa.next_state(current, input); - } - let next = nfa.next_state(current, input); - if next != fail_id() { - return next; - } - current = nfa.failure_transition(current); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/error.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/error.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/error.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/error.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -use std::error; -use std::fmt; -use std::result; - -pub type Result = result::Result; - -/// An error that occurred during the construction of an Aho-Corasick -/// automaton. -#[derive(Clone, Debug)] -pub struct Error { - kind: ErrorKind, -} - -/// The kind of error that occurred. -#[derive(Clone, Debug)] -pub enum ErrorKind { - /// An error that occurs when constructing an automaton would require the - /// use of a state ID that overflows the chosen state ID representation. - /// For example, if one is using `u8` for state IDs and builds a DFA with - /// 257 states, then the last state's ID will be `256` which cannot be - /// represented with `u8`. - StateIDOverflow { - /// The maximum possible state ID. - max: usize, - }, - /// An error that occurs when premultiplication of state IDs is requested - /// when constructing an Aho-Corasick DFA, but doing so would overflow the - /// chosen state ID representation. - /// - /// When `max == requested_max`, then the state ID would overflow `usize`. - PremultiplyOverflow { - /// The maximum possible state id. - max: usize, - /// The maximum ID required by premultiplication. - requested_max: usize, - }, -} - -impl Error { - /// Return the kind of this error. - pub fn kind(&self) -> &ErrorKind { - &self.kind - } - - pub(crate) fn state_id_overflow(max: usize) -> Error { - Error { kind: ErrorKind::StateIDOverflow { max } } - } - - pub(crate) fn premultiply_overflow( - max: usize, - requested_max: usize, - ) -> Error { - Error { kind: ErrorKind::PremultiplyOverflow { max, requested_max } } - } -} - -impl error::Error for Error { - fn description(&self) -> &str { - match self.kind { - ErrorKind::StateIDOverflow { .. } => { - "state id representation too small" - } - ErrorKind::PremultiplyOverflow { .. } => { - "state id representation too small for premultiplication" - } - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.kind { - ErrorKind::StateIDOverflow { max } => write!( - f, - "building the automaton failed because it required \ - building more states that can be identified, where the \ - maximum ID for the chosen representation is {}", - max, - ), - ErrorKind::PremultiplyOverflow { max, requested_max } => { - if max == requested_max { - write!( - f, - "premultiplication of states requires the ability to \ - represent a state ID greater than what can fit on \ - this platform's usize, which is {}", - ::std::usize::MAX, - ) - } else { - write!( - f, - "premultiplication of states requires the ability to \ - represent at least a state ID of {}, but the chosen \ - representation only permits a maximum state ID of {}", - requested_max, max, - ) - } - } - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/lib.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/lib.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/lib.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,303 +0,0 @@ -/*! -A library for finding occurrences of many patterns at once. This library -provides multiple pattern search principally through an implementation of the -[Aho-Corasick algorithm](https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_algorithm), -which builds a fast finite state machine for executing searches in linear time. - -Additionally, this library provides a number of configuration options for -building the automaton that permit controlling the space versus time trade -off. Other features include simple ASCII case insensitive matching, finding -overlapping matches, replacements, searching streams and even searching and -replacing text in streams. - -Finally, unlike all other (known) Aho-Corasick implementations, this one -supports enabling -[leftmost-first](enum.MatchKind.html#variant.LeftmostFirst) -or -[leftmost-longest](enum.MatchKind.html#variant.LeftmostFirst) -match semantics, using a (seemingly) novel alternative construction algorithm. -For more details on what match semantics means, see the -[`MatchKind`](enum.MatchKind.html) -type. - -# Overview - -This section gives a brief overview of the primary types in this crate: - -* [`AhoCorasick`](struct.AhoCorasick.html) is the primary type and represents - an Aho-Corasick automaton. This is the type you use to execute searches. -* [`AhoCorasickBuilder`](struct.AhoCorasickBuilder.html) can be used to build - an Aho-Corasick automaton, and supports configuring a number of options. -* [`Match`](struct.Match.html) represents a single match reported by an - Aho-Corasick automaton. Each match has two pieces of information: the pattern - that matched and the start and end byte offsets corresponding to the position - in the haystack at which it matched. - -Additionally, the [`packed`](packed/index.html) sub-module contains a lower -level API for using fast vectorized routines for finding a small number of -patterns in a haystack. - -# Example: basic searching - -This example shows how to search for occurrences of multiple patterns -simultaneously. Each match includes the pattern that matched along with the -byte offsets of the match. - -``` -use aho_corasick::AhoCorasick; - -let patterns = &["apple", "maple", "Snapple"]; -let haystack = "Nobody likes maple in their apple flavored Snapple."; - -let ac = AhoCorasick::new(patterns); -let mut matches = vec![]; -for mat in ac.find_iter(haystack) { - matches.push((mat.pattern(), mat.start(), mat.end())); -} -assert_eq!(matches, vec![ - (1, 13, 18), - (0, 28, 33), - (2, 43, 50), -]); -``` - -# Example: case insensitivity - -This is like the previous example, but matches `Snapple` case insensitively -using `AhoCorasickBuilder`: - -``` -use aho_corasick::AhoCorasickBuilder; - -let patterns = &["apple", "maple", "snapple"]; -let haystack = "Nobody likes maple in their apple flavored Snapple."; - -let ac = AhoCorasickBuilder::new() - .ascii_case_insensitive(true) - .build(patterns); -let mut matches = vec![]; -for mat in ac.find_iter(haystack) { - matches.push((mat.pattern(), mat.start(), mat.end())); -} -assert_eq!(matches, vec![ - (1, 13, 18), - (0, 28, 33), - (2, 43, 50), -]); -``` - -# Example: replacing matches in a stream - -This example shows how to execute a search and replace on a stream without -loading the entire stream into memory first. - -``` -use aho_corasick::AhoCorasick; - -# fn example() -> Result<(), ::std::io::Error> { -let patterns = &["fox", "brown", "quick"]; -let replace_with = &["sloth", "grey", "slow"]; - -// In a real example, these might be `std::fs::File`s instead. All you need to -// do is supply a pair of `std::io::Read` and `std::io::Write` implementations. -let rdr = "The quick brown fox."; -let mut wtr = vec![]; - -let ac = AhoCorasick::new(patterns); -ac.stream_replace_all(rdr.as_bytes(), &mut wtr, replace_with)?; -assert_eq!(b"The slow grey sloth.".to_vec(), wtr); -# Ok(()) }; example().unwrap() -``` - -# Example: finding the leftmost first match - -In the textbook description of Aho-Corasick, its formulation is typically -structured such that it reports all possible matches, even when they overlap -with another. In many cases, overlapping matches may not be desired, such as -the case of finding all successive non-overlapping matches like you might with -a standard regular expression. - -Unfortunately the "obvious" way to modify the Aho-Corasick algorithm to do -this doesn't always work in the expected way, since it will report matches as -soon as they are seen. For example, consider matching the regex `Samwise|Sam` -against the text `Samwise`. Most regex engines (that are Perl-like, or -non-POSIX) will report `Samwise` as a match, but the standard Aho-Corasick -algorithm modified for reporting non-overlapping matches will report `Sam`. - -A novel contribution of this library is the ability to change the match -semantics of Aho-Corasick (without additional search time overhead) such that -`Samwise` is reported instead. For example, here's the standard approach: - -``` -use aho_corasick::AhoCorasick; - -let patterns = &["Samwise", "Sam"]; -let haystack = "Samwise"; - -let ac = AhoCorasick::new(patterns); -let mat = ac.find(haystack).expect("should have a match"); -assert_eq!("Sam", &haystack[mat.start()..mat.end()]); -``` - -And now here's the leftmost-first version, which matches how a Perl-like -regex will work: - -``` -use aho_corasick::{AhoCorasickBuilder, MatchKind}; - -let patterns = &["Samwise", "Sam"]; -let haystack = "Samwise"; - -let ac = AhoCorasickBuilder::new() - .match_kind(MatchKind::LeftmostFirst) - .build(patterns); -let mat = ac.find(haystack).expect("should have a match"); -assert_eq!("Samwise", &haystack[mat.start()..mat.end()]); -``` - -In addition to leftmost-first semantics, this library also supports -leftmost-longest semantics, which match the POSIX behavior of a regular -expression alternation. See -[`MatchKind`](enum.MatchKind.html) -for more details. - -# Prefilters - -While an Aho-Corasick automaton can perform admirably when compared to more -naive solutions, it is generally slower than more specialized algorithms that -are accelerated using vector instructions such as SIMD. - -For that reason, this library will internally use a "prefilter" to attempt -to accelerate searches when possible. Currently, this library has several -different algorithms it might use depending on the patterns provided. Once the -number of patterns gets too big, prefilters are no longer used. - -While a prefilter is generally good to have on by default since it works -well in the common case, it can lead to less predictable or even sub-optimal -performance in some cases. For that reason, prefilters can be explicitly -disabled via -[`AhoCorasickBuilder::prefilter`](struct.AhoCorasickBuilder.html#method.prefilter). -*/ - -#![deny(missing_docs)] - -// We can never be truly no_std, but we could be alloc-only some day, so -// require the std feature for now. -#[cfg(not(feature = "std"))] -compile_error!("`std` feature is currently required to build this crate"); - -// #[cfg(doctest)] -// #[macro_use] -// extern crate doc_comment; - -// #[cfg(doctest)] -// doctest!("../README.md"); - -pub use crate::ahocorasick::{ - AhoCorasick, AhoCorasickBuilder, FindIter, FindOverlappingIter, MatchKind, - StreamFindIter, -}; -pub use crate::error::{Error, ErrorKind}; -pub use crate::state_id::StateID; - -mod ahocorasick; -mod automaton; -mod buffer; -mod byte_frequencies; -mod classes; -mod dfa; -mod error; -mod nfa; -pub mod packed; -mod prefilter; -mod state_id; -#[cfg(test)] -mod tests; - -/// A representation of a match reported by an Aho-Corasick automaton. -/// -/// A match has two essential pieces of information: the identifier of the -/// pattern that matched, along with the start and end offsets of the match -/// in the haystack. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use aho_corasick::AhoCorasick; -/// -/// let ac = AhoCorasick::new(&[ -/// "foo", "bar", "baz", -/// ]); -/// let mat = ac.find("xxx bar xxx").expect("should have a match"); -/// assert_eq!(1, mat.pattern()); -/// assert_eq!(4, mat.start()); -/// assert_eq!(7, mat.end()); -/// ``` -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub struct Match { - /// The pattern id. - pattern: usize, - /// The length of this match, such that the starting position of the match - /// is `end - len`. - /// - /// We use length here because, other than the pattern id, the only - /// information about each pattern that the automaton stores is its length. - /// So using the length here is just a bit more natural. But it isn't - /// technically required. - len: usize, - /// The end offset of the match, exclusive. - end: usize, -} - -impl Match { - /// Returns the identifier of the pattern that matched. - /// - /// The identifier of a pattern is derived from the position in which it - /// was originally inserted into the corresponding automaton. The first - /// pattern has identifier `0`, and each subsequent pattern is `1`, `2` - /// and so on. - #[inline] - pub fn pattern(&self) -> usize { - self.pattern - } - - /// The starting position of the match. - #[inline] - pub fn start(&self) -> usize { - self.end - self.len - } - - /// The ending position of the match. - #[inline] - pub fn end(&self) -> usize { - self.end - } - - /// The length, in bytes, of the match. - #[inline] - pub fn len(&self) -> usize { - self.len - } - - /// Returns true if and only if this match is empty. That is, when - /// `start() == end()`. - /// - /// An empty match can only be returned when the empty string was among - /// the patterns used to build the Aho-Corasick automaton. - #[inline] - pub fn is_empty(&self) -> bool { - self.len == 0 - } - - #[inline] - fn increment(&self, by: usize) -> Match { - Match { pattern: self.pattern, len: self.len, end: self.end + by } - } - - #[inline] - fn from_span(id: usize, start: usize, end: usize) -> Match { - Match { pattern: id, len: end - start, end } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/nfa.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/nfa.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/nfa.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/nfa.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1214 +0,0 @@ -use std::cmp; -use std::collections::{BTreeSet, VecDeque}; -use std::fmt; -use std::mem::size_of; -use std::ops::{Index, IndexMut}; - -use crate::ahocorasick::MatchKind; -use crate::automaton::Automaton; -use crate::classes::{ByteClassBuilder, ByteClasses}; -use crate::error::Result; -use crate::prefilter::{self, opposite_ascii_case, Prefilter, PrefilterObj}; -use crate::state_id::{dead_id, fail_id, usize_to_state_id, StateID}; -use crate::Match; - -/// The identifier for a pattern, which is simply the position of the pattern -/// in the sequence of patterns given by the caller. -pub type PatternID = usize; - -/// The length of a pattern, in bytes. -pub type PatternLength = usize; - -/// An Aho-Corasick automaton, represented as an NFA. -/// -/// This is the classical formulation of Aho-Corasick, which involves building -/// up a prefix trie of a given set of patterns, and then wiring up failure -/// transitions between states in order to guarantee linear time matching. The -/// standard formulation is, technically, an NFA because of these failure -/// transitions. That is, one can see them as enabling the automaton to be in -/// multiple states at once. Indeed, during search, it is possible to check -/// the transitions on multiple states for a single input byte. -/// -/// This particular implementation not only supports the standard style of -/// matching, but also provides a mode for choosing leftmost-first or -/// leftmost-longest match semantics. When a leftmost mode is chosen, some -/// failure transitions that would otherwise be added are elided. See -/// the documentation of `MatchKind` for more details and examples on how the -/// match semantics may differ. -/// -/// If one wants a DFA, then it is necessary to first build an NFA and convert -/// it into a DFA. Note, however, that because we've constrained ourselves to -/// matching literal patterns, this does not need to use subset construction -/// for determinization. Instead, the DFA has at most a number of states -/// equivalent to the number of NFA states. The only real difference between -/// them is that all failure transitions are followed and pre-computed. This -/// uses much more memory, but also executes searches more quickly. -#[derive(Clone)] -pub struct NFA { - /// The match semantics built into this NFA. - match_kind: MatchKind, - /// The start state id as an index into `states`. - start_id: S, - /// The length, in bytes, of the longest pattern in this automaton. This - /// information is useful for keeping correct buffer sizes when searching - /// on streams. - max_pattern_len: usize, - /// The total number of patterns added to this automaton, including - /// patterns that may never be matched. - pattern_count: usize, - /// The number of bytes of heap used by this NFA's transition table. - heap_bytes: usize, - /// A prefilter for quickly skipping to candidate matches, if pertinent. - prefilter: Option, - /// Whether this automaton anchors all matches to the start of input. - anchored: bool, - /// A set of equivalence classes in terms of bytes. We compute this while - /// building the NFA, but don't use it in the NFA's states. Instead, we - /// use this for building the DFA. We store it on the NFA since it's easy - /// to compute while visiting the patterns. - byte_classes: ByteClasses, - /// A set of states. Each state defines its own transitions, a fail - /// transition and a set of indices corresponding to matches. - /// - /// The first state is always the fail state, which is used only as a - /// sentinel. Namely, in the final NFA, no transition into the fail state - /// exists. (Well, they do, but they aren't followed. Instead, the state's - /// failure transition is followed.) - /// - /// The second state (index 1) is always the dead state. Dead states are - /// in every automaton, but only used when leftmost-{first,longest} match - /// semantics are enabled. Specifically, they instruct search to stop - /// at specific points in order to report the correct match location. In - /// the standard Aho-Corasick construction, there are no transitions to - /// the dead state. - /// - /// The third state (index 2) is generally intended to be the starting or - /// "root" state. - states: Vec>, -} - -impl NFA { - /// Returns the equivalence classes of bytes found while constructing - /// this NFA. - /// - /// Note that the NFA doesn't actually make use of these equivalence - /// classes. Instead, these are useful for building the DFA when desired. - pub fn byte_classes(&self) -> &ByteClasses { - &self.byte_classes - } - - /// Returns a prefilter, if one exists. - pub fn prefilter_obj(&self) -> Option<&PrefilterObj> { - self.prefilter.as_ref() - } - - /// Returns the total number of heap bytes used by this NFA's transition - /// table. - pub fn heap_bytes(&self) -> usize { - self.heap_bytes - + self.prefilter.as_ref().map_or(0, |p| p.as_ref().heap_bytes()) - } - - /// Return the length of the longest pattern in this automaton. - pub fn max_pattern_len(&self) -> usize { - self.max_pattern_len - } - - /// Return the total number of patterns added to this automaton. - pub fn pattern_count(&self) -> usize { - self.pattern_count - } - - /// Returns the total number of states in this NFA. - pub fn state_len(&self) -> usize { - self.states.len() - } - - /// Returns the matches for the given state. - pub fn matches(&self, id: S) -> &[(PatternID, PatternLength)] { - &self.states[id.to_usize()].matches - } - - /// Returns an iterator over all transitions in the given state according - /// to the given equivalence classes, including transitions to `fail_id()`. - /// The number of transitions returned is always equivalent to the number - /// of equivalence classes. - pub fn iter_all_transitions( - &self, - byte_classes: &ByteClasses, - id: S, - f: F, - ) { - self.states[id.to_usize()].trans.iter_all(byte_classes, f); - } - - /// Returns the failure transition for the given state. - pub fn failure_transition(&self, id: S) -> S { - self.states[id.to_usize()].fail - } - - /// Returns the next state for the given state and input byte. - /// - /// Note that this does not follow failure transitions. As such, the id - /// returned may be `fail_id`. - pub fn next_state(&self, current: S, input: u8) -> S { - self.states[current.to_usize()].next_state(input) - } - - fn state(&self, id: S) -> &State { - &self.states[id.to_usize()] - } - - fn state_mut(&mut self, id: S) -> &mut State { - &mut self.states[id.to_usize()] - } - - fn start(&self) -> &State { - self.state(self.start_id) - } - - fn start_mut(&mut self) -> &mut State { - let id = self.start_id; - self.state_mut(id) - } - - fn iter_transitions_mut(&mut self, id: S) -> IterTransitionsMut<'_, S> { - IterTransitionsMut::new(self, id) - } - - fn copy_matches(&mut self, src: S, dst: S) { - let (src, dst) = - get_two_mut(&mut self.states, src.to_usize(), dst.to_usize()); - dst.matches.extend_from_slice(&src.matches); - } - - fn copy_empty_matches(&mut self, dst: S) { - let start_id = self.start_id; - self.copy_matches(start_id, dst); - } - - fn add_dense_state(&mut self, depth: usize) -> Result { - let trans = Transitions::Dense(Dense::new()); - let id = usize_to_state_id(self.states.len())?; - self.states.push(State { - trans, - // Anchored automatons do not have any failure transitions. - fail: if self.anchored { dead_id() } else { self.start_id }, - depth, - matches: vec![], - }); - Ok(id) - } - - fn add_sparse_state(&mut self, depth: usize) -> Result { - let trans = Transitions::Sparse(vec![]); - let id = usize_to_state_id(self.states.len())?; - self.states.push(State { - trans, - // Anchored automatons do not have any failure transitions. - fail: if self.anchored { dead_id() } else { self.start_id }, - depth, - matches: vec![], - }); - Ok(id) - } -} - -impl Automaton for NFA { - type ID = S; - - fn match_kind(&self) -> &MatchKind { - &self.match_kind - } - - fn anchored(&self) -> bool { - self.anchored - } - - fn prefilter(&self) -> Option<&dyn Prefilter> { - self.prefilter.as_ref().map(|p| p.as_ref()) - } - - fn start_state(&self) -> S { - self.start_id - } - - fn is_valid(&self, id: S) -> bool { - id.to_usize() < self.states.len() - } - - fn is_match_state(&self, id: S) -> bool { - self.states[id.to_usize()].is_match() - } - - fn get_match( - &self, - id: S, - match_index: usize, - end: usize, - ) -> Option { - let state = match self.states.get(id.to_usize()) { - None => return None, - Some(state) => state, - }; - state.matches.get(match_index).map(|&(id, len)| Match { - pattern: id, - len, - end, - }) - } - - fn match_count(&self, id: S) -> usize { - self.states[id.to_usize()].matches.len() - } - - fn next_state(&self, mut current: S, input: u8) -> S { - // This terminates since: - // - // 1. `State.fail` never points to fail_id(). - // 2. All `State.fail` values point to a state closer to `start`. - // 3. The start state has no transitions to fail_id(). - loop { - let state = &self.states[current.to_usize()]; - let next = state.next_state(input); - if next != fail_id() { - return next; - } - current = state.fail; - } - } -} - -/// A representation of an NFA state for an Aho-Corasick automaton. -/// -/// It contains the transitions to the next state, a failure transition for -/// cases where there exists no other transition for the current input byte, -/// the matches implied by visiting this state (if any) and the depth of this -/// state. The depth of a state is simply the distance from it to the start -/// state in the automaton, where the depth of the start state is 0. -#[derive(Clone, Debug)] -pub struct State { - trans: Transitions, - fail: S, - matches: Vec<(PatternID, PatternLength)>, - // TODO: Strictly speaking, this isn't needed for searching. It's only - // used when building an NFA that supports leftmost match semantics. We - // could drop this from the state and dynamically build a map only when - // computing failure transitions, but it's not clear which is better. - // Benchmark this. - depth: usize, -} - -impl State { - fn heap_bytes(&self) -> usize { - self.trans.heap_bytes() - + (self.matches.len() * size_of::<(PatternID, PatternLength)>()) - } - - fn add_match(&mut self, i: PatternID, len: PatternLength) { - self.matches.push((i, len)); - } - - fn is_match(&self) -> bool { - !self.matches.is_empty() - } - - fn next_state(&self, input: u8) -> S { - self.trans.next_state(input) - } - - fn set_next_state(&mut self, input: u8, next: S) { - self.trans.set_next_state(input, next); - } -} - -/// Represents the transitions for a single dense state. -/// -/// The primary purpose here is to encapsulate index access. Namely, since a -/// dense representation always contains 256 elements, all values of `u8` are -/// valid indices. -#[derive(Clone, Debug)] -struct Dense(Vec); - -impl Dense -where - S: StateID, -{ - fn new() -> Self { - Dense(vec![fail_id(); 256]) - } - - #[inline] - fn len(&self) -> usize { - self.0.len() - } -} - -impl Index for Dense { - type Output = S; - - #[inline] - fn index(&self, i: u8) -> &S { - // SAFETY: This is safe because all dense transitions have - // exactly 256 elements, so all u8 values are valid indices. - &self.0[i as usize] - } -} - -impl IndexMut for Dense { - #[inline] - fn index_mut(&mut self, i: u8) -> &mut S { - // SAFETY: This is safe because all dense transitions have - // exactly 256 elements, so all u8 values are valid indices. - &mut self.0[i as usize] - } -} - -/// A representation of transitions in an NFA. -/// -/// Transitions have either a sparse representation, which is slower for -/// lookups but uses less memory, or a dense representation, which is faster -/// for lookups but uses more memory. In the sparse representation, the absence -/// of a state implies a transition to `fail_id()`. Transitions to `dead_id()` -/// are still explicitly represented. -/// -/// For the NFA, by default, we use a dense representation for transitions for -/// states close to the start state because it's likely these are the states -/// that will be most frequently visited. -#[derive(Clone, Debug)] -enum Transitions { - Sparse(Vec<(u8, S)>), - Dense(Dense), -} - -impl Transitions { - fn heap_bytes(&self) -> usize { - match *self { - Transitions::Sparse(ref sparse) => { - sparse.len() * size_of::<(u8, S)>() - } - Transitions::Dense(ref dense) => dense.len() * size_of::(), - } - } - - fn next_state(&self, input: u8) -> S { - match *self { - Transitions::Sparse(ref sparse) => { - for &(b, id) in sparse { - if b == input { - return id; - } - } - fail_id() - } - Transitions::Dense(ref dense) => dense[input], - } - } - - fn set_next_state(&mut self, input: u8, next: S) { - match *self { - Transitions::Sparse(ref mut sparse) => { - match sparse.binary_search_by_key(&input, |&(b, _)| b) { - Ok(i) => sparse[i] = (input, next), - Err(i) => sparse.insert(i, (input, next)), - } - } - Transitions::Dense(ref mut dense) => { - dense[input] = next; - } - } - } - - /// Iterate over transitions in this state while skipping over transitions - /// to `fail_id()`. - fn iter(&self, mut f: F) { - match *self { - Transitions::Sparse(ref sparse) => { - for &(b, id) in sparse { - f(b, id); - } - } - Transitions::Dense(ref dense) => { - for b in AllBytesIter::new() { - let id = dense[b]; - if id != fail_id() { - f(b, id); - } - } - } - } - } - - /// Iterate over all transitions in this state according to the given - /// equivalence classes, including transitions to `fail_id()`. - fn iter_all(&self, classes: &ByteClasses, mut f: F) { - if classes.is_singleton() { - match *self { - Transitions::Sparse(ref sparse) => { - sparse_iter(sparse, f); - } - Transitions::Dense(ref dense) => { - for b in AllBytesIter::new() { - f(b, dense[b]); - } - } - } - } else { - // In this case, we only want to yield a single byte for each - // equivalence class. - match *self { - Transitions::Sparse(ref sparse) => { - let mut last_class = None; - sparse_iter(sparse, |b, next| { - let class = classes.get(b); - if last_class != Some(class) { - last_class = Some(class); - f(b, next); - } - }) - } - Transitions::Dense(ref dense) => { - for b in classes.representatives() { - f(b, dense[b]); - } - } - } - } - } -} - -/// Iterator over transitions in a state, skipping transitions to `fail_id()`. -/// -/// This abstracts over the representation of NFA transitions, which may be -/// either in a sparse or dense representation. -/// -/// This somewhat idiosyncratically borrows the NFA mutably, so that when one -/// is iterating over transitions, the caller can still mutate the NFA. This -/// is useful when creating failure transitions. -#[derive(Debug)] -struct IterTransitionsMut<'a, S: StateID> { - nfa: &'a mut NFA, - state_id: S, - cur: usize, -} - -impl<'a, S: StateID> IterTransitionsMut<'a, S> { - fn new(nfa: &'a mut NFA, state_id: S) -> IterTransitionsMut<'a, S> { - IterTransitionsMut { nfa, state_id, cur: 0 } - } - - fn nfa(&mut self) -> &mut NFA { - self.nfa - } -} - -impl<'a, S: StateID> Iterator for IterTransitionsMut<'a, S> { - type Item = (u8, S); - - fn next(&mut self) -> Option<(u8, S)> { - match self.nfa.states[self.state_id.to_usize()].trans { - Transitions::Sparse(ref sparse) => { - if self.cur >= sparse.len() { - return None; - } - let i = self.cur; - self.cur += 1; - Some(sparse[i]) - } - Transitions::Dense(ref dense) => { - while self.cur < dense.len() { - // There are always exactly 255 transitions in dense repr. - debug_assert!(self.cur < 256); - - let b = self.cur as u8; - let id = dense[b]; - self.cur += 1; - if id != fail_id() { - return Some((b, id)); - } - } - None - } - } - } -} - -/// A simple builder for configuring the NFA construction of Aho-Corasick. -#[derive(Clone, Debug)] -pub struct Builder { - dense_depth: usize, - match_kind: MatchKind, - prefilter: bool, - anchored: bool, - ascii_case_insensitive: bool, -} - -impl Default for Builder { - fn default() -> Builder { - Builder { - dense_depth: 2, - match_kind: MatchKind::default(), - prefilter: true, - anchored: false, - ascii_case_insensitive: false, - } - } -} - -impl Builder { - pub fn new() -> Builder { - Builder::default() - } - - pub fn build(&self, patterns: I) -> Result> - where - I: IntoIterator, - P: AsRef<[u8]>, - { - Compiler::new(self)?.compile(patterns) - } - - pub fn match_kind(&mut self, kind: MatchKind) -> &mut Builder { - self.match_kind = kind; - self - } - - pub fn dense_depth(&mut self, depth: usize) -> &mut Builder { - self.dense_depth = depth; - self - } - - pub fn prefilter(&mut self, yes: bool) -> &mut Builder { - self.prefilter = yes; - self - } - - pub fn anchored(&mut self, yes: bool) -> &mut Builder { - self.anchored = yes; - self - } - - pub fn ascii_case_insensitive(&mut self, yes: bool) -> &mut Builder { - self.ascii_case_insensitive = yes; - self - } -} - -/// A compiler uses a builder configuration and builds up the NFA formulation -/// of an Aho-Corasick automaton. This roughly corresponds to the standard -/// formulation described in textbooks. -#[derive(Debug)] -struct Compiler<'a, S: StateID> { - builder: &'a Builder, - prefilter: prefilter::Builder, - nfa: NFA, - byte_classes: ByteClassBuilder, -} - -impl<'a, S: StateID> Compiler<'a, S> { - fn new(builder: &'a Builder) -> Result> { - Ok(Compiler { - builder, - prefilter: prefilter::Builder::new(builder.match_kind) - .ascii_case_insensitive(builder.ascii_case_insensitive), - nfa: NFA { - match_kind: builder.match_kind, - start_id: usize_to_state_id(2)?, - max_pattern_len: 0, - pattern_count: 0, - heap_bytes: 0, - prefilter: None, - anchored: builder.anchored, - byte_classes: ByteClasses::singletons(), - states: vec![], - }, - byte_classes: ByteClassBuilder::new(), - }) - } - - fn compile(mut self, patterns: I) -> Result> - where - I: IntoIterator, - P: AsRef<[u8]>, - { - self.add_state(0)?; // the fail state, which is never entered - self.add_state(0)?; // the dead state, only used for leftmost - self.add_state(0)?; // the start state - self.build_trie(patterns)?; - self.add_start_state_loop(); - self.add_dead_state_loop(); - if !self.builder.anchored { - self.fill_failure_transitions(); - } - self.close_start_state_loop(); - self.nfa.byte_classes = self.byte_classes.build(); - if !self.builder.anchored { - self.nfa.prefilter = self.prefilter.build(); - } - self.calculate_size(); - Ok(self.nfa) - } - - /// This sets up the initial prefix trie that makes up the Aho-Corasick - /// automaton. Effectively, it creates the basic structure of the - /// automaton, where every pattern given has a path from the start state to - /// the end of the pattern. - fn build_trie(&mut self, patterns: I) -> Result<()> - where - I: IntoIterator, - P: AsRef<[u8]>, - { - 'PATTERNS: for (pati, pat) in patterns.into_iter().enumerate() { - let pat = pat.as_ref(); - self.nfa.max_pattern_len = - cmp::max(self.nfa.max_pattern_len, pat.len()); - self.nfa.pattern_count += 1; - - let mut prev = self.nfa.start_id; - let mut saw_match = false; - for (depth, &b) in pat.iter().enumerate() { - // When leftmost-first match semantics are requested, we - // specifically stop adding patterns when a previously added - // pattern is a prefix of it. We avoid adding it because - // leftmost-first semantics imply that the pattern can never - // match. This is not just an optimization to save space! It - // is necessary for correctness. In fact, this is the only - // difference in the automaton between the implementations for - // leftmost-first and leftmost-longest. - saw_match = saw_match || self.nfa.state(prev).is_match(); - if self.builder.match_kind.is_leftmost_first() && saw_match { - // Skip to the next pattern immediately. This avoids - // incorrectly adding a match after this loop terminates. - continue 'PATTERNS; - } - - // Add this byte to our equivalence classes. We don't use these - // for NFA construction. These are instead used only if we're - // building a DFA. They would technically be useful for the - // NFA, but it would require a second pass over the patterns. - self.byte_classes.set_range(b, b); - if self.builder.ascii_case_insensitive { - let b = opposite_ascii_case(b); - self.byte_classes.set_range(b, b); - } - - // If the transition from prev using the current byte already - // exists, then just move through it. Otherwise, add a new - // state. We track the depth here so that we can determine - // how to represent transitions. States near the start state - // use a dense representation that uses more memory but is - // faster. Other states use a sparse representation that uses - // less memory but is slower. - let next = self.nfa.state(prev).next_state(b); - if next != fail_id() { - prev = next; - } else { - let next = self.add_state(depth + 1)?; - self.nfa.state_mut(prev).set_next_state(b, next); - if self.builder.ascii_case_insensitive { - let b = opposite_ascii_case(b); - self.nfa.state_mut(prev).set_next_state(b, next); - } - prev = next; - } - } - // Once the pattern has been added, log the match in the final - // state that it reached. - self.nfa.state_mut(prev).add_match(pati, pat.len()); - // ... and hand it to the prefilter builder, if applicable. - if self.builder.prefilter { - self.prefilter.add(pat); - } - } - Ok(()) - } - - /// This routine creates failure transitions according to the standard - /// textbook formulation of the Aho-Corasick algorithm, with a couple small - /// tweaks to support "leftmost" semantics. - /// - /// Building failure transitions is the most interesting part of building - /// the Aho-Corasick automaton, because they are what allow searches to - /// be performed in linear time. Specifically, a failure transition is - /// a single transition associated with each state that points back to - /// the longest proper suffix of the pattern being searched. The failure - /// transition is followed whenever there exists no transition on the - /// current state for the current input byte. If there is no other proper - /// suffix, then the failure transition points back to the starting state. - /// - /// For example, let's say we built an Aho-Corasick automaton with the - /// following patterns: 'abcd' and 'cef'. The trie looks like this: - /// - /// ```ignore - /// a - S1 - b - S2 - c - S3 - d - S4* - /// / - /// S0 - c - S5 - e - S6 - f - S7* - /// ``` - /// - /// At this point, it should be fairly straight-forward to see how this - /// trie can be used in a simplistic way. At any given position in the - /// text we're searching (called the "subject" string), all we need to do - /// is follow the transitions in the trie by consuming one transition for - /// each byte in the subject string. If we reach a match state, then we can - /// report that location as a match. - /// - /// The trick comes when searching a subject string like 'abcef'. We'll - /// initially follow the transition from S0 to S1 and wind up in S3 after - /// observng the 'c' byte. At this point, the next byte is 'e' but state - /// S3 has no transition for 'e', so the search fails. We then would need - /// to restart the search at the next position in 'abcef', which - /// corresponds to 'b'. The match would fail, but the next search starting - /// at 'c' would finally succeed. The problem with this approach is that - /// we wind up searching the subject string potentially many times. In - /// effect, this makes the algorithm have worst case `O(n * m)` complexity, - /// where `n ~ len(subject)` and `m ~ len(all patterns)`. We would instead - /// like to achieve a `O(n + m)` worst case complexity. - /// - /// This is where failure transitions come in. Instead of dying at S3 in - /// the first search, the automaton can instruct the search to move to - /// another part of the automaton that corresponds to a suffix of what - /// we've seen so far. Recall that we've seen 'abc' in the subject string, - /// and the automaton does indeed have a non-empty suffix, 'c', that could - /// potentially lead to another match. Thus, the actual Aho-Corasick - /// automaton for our patterns in this case looks like this: - /// - /// ```ignore - /// a - S1 - b - S2 - c - S3 - d - S4* - /// / / - /// / ---------------- - /// / / - /// S0 - c - S5 - e - S6 - f - S7* - /// ``` - /// - /// That is, we have a failure transition from S3 to S5, which is followed - /// exactly in cases when we are in state S3 but see any byte other than - /// 'd' (that is, we've "failed" to find a match in this portion of our - /// trie). We know we can transition back to S5 because we've already seen - /// a 'c' byte, so we don't need to re-scan it. We can then pick back up - /// with the search starting at S5 and complete our match. - /// - /// Adding failure transitions to a trie is fairly simple, but subtle. The - /// key issue is that you might have multiple failure transition that you - /// need to follow. For example, look at the trie for the patterns - /// 'abcd', 'b', 'bcd' and 'cd': - /// - /// ```ignore - /// - a - S1 - b - S2* - c - S3 - d - S4* - /// / / / - /// / ------- ------- - /// / / / - /// S0 --- b - S5* - c - S6 - d - S7* - /// \ / - /// \ -------- - /// \ / - /// - c - S8 - d - S9* - /// ``` - /// - /// The failure transitions for this trie are defined from S2 to S5, - /// S3 to S6 and S6 to S8. Moreover, state S2 needs to track that it - /// corresponds to a match, since its failure transition to S5 is itself - /// a match state. - /// - /// Perhaps simplest way to think about adding these failure transitions - /// is recursively. That is, if you know the failure transitions for every - /// possible previous state that could be visited (e.g., when computing the - /// failure transition for S3, you already know the failure transitions - /// for S0, S1 and S2), then you can simply follow the failure transition - /// of the previous state and check whether the incoming transition is - /// defined after following the failure transition. - /// - /// For example, when determining the failure state for S3, by our - /// assumptions, we already know that there is a failure transition from - /// S2 (the previous state) to S5. So we follow that transition and check - /// whether the transition connecting S2 to S3 is defined. Indeed, it is, - /// as there is a transition from S5 to S6 for the byte 'c'. If no such - /// transition existed, we could keep following the failure transitions - /// until we reach the start state, which is the failure transition for - /// every state that has no corresponding proper suffix. - /// - /// We don't actually use recursion to implement this, but instead, use a - /// breadth first search of the automaton. Our base case is the start - /// state, whose failure transition is just a transition to itself. - /// - /// When building a leftmost automaton, we proceed as above, but only - /// include a subset of failure transitions. Namely, we omit any failure - /// transitions that appear after a match state in the trie. This is - /// because failure transitions always point back to a proper suffix of - /// what has been seen so far. Thus, following a failure transition after - /// a match implies looking for a match that starts after the one that has - /// already been seen, which is of course therefore not the leftmost match. - /// - /// N.B. I came up with this algorithm on my own, and after scouring all of - /// the other AC implementations I know of (Perl, Snort, many on GitHub). - /// I couldn't find any that implement leftmost semantics like this. - /// Perl of course needs leftmost-first semantics, but they implement it - /// with a seeming hack at *search* time instead of encoding it into the - /// automaton. There are also a couple Java libraries that support leftmost - /// longest semantics, but they do it by building a queue of matches at - /// search time, which is even worse than what Perl is doing. ---AG - fn fill_failure_transitions(&mut self) { - let kind = self.match_kind(); - // Initialize the queue for breadth first search with all transitions - // out of the start state. We handle the start state specially because - // we only want to follow non-self transitions. If we followed self - // transitions, then this would never terminate. - let mut queue = VecDeque::new(); - let mut seen = self.queued_set(); - let mut it = self.nfa.iter_transitions_mut(self.nfa.start_id); - while let Some((_, next)) = it.next() { - // Skip anything we've seen before and any self-transitions on the - // start state. - if next == it.nfa().start_id || seen.contains(next) { - continue; - } - queue.push_back(next); - seen.insert(next); - // Under leftmost semantics, if a state immediately following - // the start state is a match state, then we never want to - // follow its failure transition since the failure transition - // necessarily leads back to the start state, which we never - // want to do for leftmost matching after a match has been - // found. - // - // We apply the same logic to non-start states below as well. - if kind.is_leftmost() && it.nfa().state(next).is_match() { - it.nfa().state_mut(next).fail = dead_id(); - } - } - while let Some(id) = queue.pop_front() { - let mut it = self.nfa.iter_transitions_mut(id); - while let Some((b, next)) = it.next() { - if seen.contains(next) { - // The only way to visit a duplicate state in a transition - // list is when ASCII case insensitivity is enabled. In - // this case, we want to skip it since it's redundant work. - // But it would also end up duplicating matches, which - // results in reporting duplicate matches in some cases. - // See the 'acasei010' regression test. - continue; - } - queue.push_back(next); - seen.insert(next); - - // As above for start states, under leftmost semantics, once - // we see a match all subsequent states should have no failure - // transitions because failure transitions always imply looking - // for a match that is a suffix of what has been seen so far - // (where "seen so far" corresponds to the string formed by - // following the transitions from the start state to the - // current state). Under leftmost semantics, we specifically do - // not want to allow this to happen because we always want to - // report the match found at the leftmost position. - // - // The difference between leftmost-first and leftmost-longest - // occurs previously while we build the trie. For - // leftmost-first, we simply omit any entries that would - // otherwise require passing through a match state. - // - // Note that for correctness, the failure transition has to be - // set to the dead state for ALL states following a match, not - // just the match state itself. However, by setting the failure - // transition to the dead state on all match states, the dead - // state will automatically propagate to all subsequent states - // via the failure state computation below. - if kind.is_leftmost() && it.nfa().state(next).is_match() { - it.nfa().state_mut(next).fail = dead_id(); - continue; - } - let mut fail = it.nfa().state(id).fail; - while it.nfa().state(fail).next_state(b) == fail_id() { - fail = it.nfa().state(fail).fail; - } - fail = it.nfa().state(fail).next_state(b); - it.nfa().state_mut(next).fail = fail; - it.nfa().copy_matches(fail, next); - } - // If the start state is a match state, then this automaton can - // match the empty string. This implies all states are match states - // since every position matches the empty string, so copy the - // matches from the start state to every state. Strictly speaking, - // this is only necessary for overlapping matches since each - // non-empty non-start match state needs to report empty matches - // in addition to its own. For the non-overlapping case, such - // states only report the first match, which is never empty since - // it isn't a start state. - if !kind.is_leftmost() { - it.nfa().copy_empty_matches(id); - } - } - } - - /// Returns a set that tracked queued states. - /// - /// This is only necessary when ASCII case insensitivity is enabled, since - /// it is the only way to visit the same state twice. Otherwise, this - /// returns an inert set that nevers adds anything and always reports - /// `false` for every member test. - fn queued_set(&self) -> QueuedSet { - if self.builder.ascii_case_insensitive { - QueuedSet::active() - } else { - QueuedSet::inert() - } - } - - /// Set the failure transitions on the start state to loop back to the - /// start state. This effectively permits the Aho-Corasick automaton to - /// match at any position. This is also required for finding the next - /// state to terminate, namely, finding the next state should never return - /// a fail_id. - /// - /// This must be done after building the initial trie, since trie - /// construction depends on transitions to `fail_id` to determine whether a - /// state already exists or not. - fn add_start_state_loop(&mut self) { - let start_id = self.nfa.start_id; - let start = self.nfa.start_mut(); - for b in AllBytesIter::new() { - if start.next_state(b) == fail_id() { - start.set_next_state(b, start_id); - } - } - } - - /// Remove the start state loop by rewriting any transitions on the start - /// state back to the start state with transitions to the dead state. - /// - /// The loop is only closed when two conditions are met: the start state - /// is a match state and the match kind is leftmost-first or - /// leftmost-longest. (Alternatively, if this is an anchored automaton, - /// then the start state is always closed, regardless of aforementioned - /// conditions.) - /// - /// The reason for this is that under leftmost semantics, a start state - /// that is also a match implies that we should never restart the search - /// process. We allow normal transitions out of the start state, but if - /// none exist, we transition to the dead state, which signals that - /// searching should stop. - fn close_start_state_loop(&mut self) { - if self.builder.anchored - || (self.match_kind().is_leftmost() && self.nfa.start().is_match()) - { - let start_id = self.nfa.start_id; - let start = self.nfa.start_mut(); - for b in AllBytesIter::new() { - if start.next_state(b) == start_id { - start.set_next_state(b, dead_id()); - } - } - } - } - - /// Sets all transitions on the dead state to point back to the dead state. - /// Normally, missing transitions map back to the failure state, but the - /// point of the dead state is to act as a sink that can never be escaped. - fn add_dead_state_loop(&mut self) { - let dead = self.nfa.state_mut(dead_id()); - for b in AllBytesIter::new() { - dead.set_next_state(b, dead_id()); - } - } - - /// Computes the total amount of heap used by this NFA in bytes. - fn calculate_size(&mut self) { - let mut size = 0; - for state in &self.nfa.states { - size += size_of::>() + state.heap_bytes(); - } - self.nfa.heap_bytes = size; - } - - /// Add a new state to the underlying NFA with the given depth. The depth - /// is used to determine how to represent the transitions. - /// - /// If adding the new state would overflow the chosen state ID - /// representation, then this returns an error. - fn add_state(&mut self, depth: usize) -> Result { - if depth < self.builder.dense_depth { - self.nfa.add_dense_state(depth) - } else { - self.nfa.add_sparse_state(depth) - } - } - - /// Returns the match kind configured on the underlying builder. - fn match_kind(&self) -> MatchKind { - self.builder.match_kind - } -} - -/// A set of state identifiers used to avoid revisiting the same state multiple -/// times when filling in failure transitions. -/// -/// This set has an "inert" and an "active" mode. When inert, the set never -/// stores anything and always returns `false` for every member test. This is -/// useful to avoid the performance and memory overhead of maintaining this -/// set when it is not needed. -#[derive(Debug)] -struct QueuedSet { - set: Option>, -} - -impl QueuedSet { - /// Return an inert set that returns `false` for every state ID membership - /// test. - fn inert() -> QueuedSet { - QueuedSet { set: None } - } - - /// Return an active set that tracks state ID membership. - fn active() -> QueuedSet { - QueuedSet { set: Some(BTreeSet::new()) } - } - - /// Inserts the given state ID into this set. (If the set is inert, then - /// this is a no-op.) - fn insert(&mut self, state_id: S) { - if let Some(ref mut set) = self.set { - set.insert(state_id); - } - } - - /// Returns true if and only if the given state ID is in this set. If the - /// set is inert, this always returns false. - fn contains(&self, state_id: S) -> bool { - match self.set { - None => false, - Some(ref set) => set.contains(&state_id), - } - } -} - -/// An iterator over every byte value. -/// -/// We use this instead of (0..256).map(|b| b as u8) because this optimizes -/// better in debug builds. -/// -/// We also use this instead of 0..=255 because we're targeting Rust 1.24 and -/// inclusive range syntax was stabilized in Rust 1.26. We can get rid of this -/// once our MSRV is Rust 1.26 or newer. -#[derive(Debug)] -struct AllBytesIter(u16); - -impl AllBytesIter { - fn new() -> AllBytesIter { - AllBytesIter(0) - } -} - -impl Iterator for AllBytesIter { - type Item = u8; - - fn next(&mut self) -> Option { - if self.0 >= 256 { - None - } else { - let b = self.0 as u8; - self.0 += 1; - Some(b) - } - } -} - -impl fmt::Debug for NFA { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f, "NFA(")?; - writeln!(f, "match_kind: {:?}", self.match_kind)?; - writeln!(f, "prefilter: {:?}", self.prefilter)?; - writeln!(f, "{}", "-".repeat(79))?; - for (id, s) in self.states.iter().enumerate() { - let mut trans = vec![]; - s.trans.iter(|byte, next| { - // The start state has a bunch of uninteresting transitions - // back into itself. It's questionable to hide them since they - // are critical to understanding the automaton, but they are - // very noisy without better formatting for contiugous ranges - // to the same state. - if id == self.start_id.to_usize() && next == self.start_id { - return; - } - // Similarly, the dead state has a bunch of uninteresting - // transitions too. - if id == dead_id() { - return; - } - trans.push(format!("{} => {}", escape(byte), next.to_usize())); - }); - writeln!(f, "{:04}: {}", id, trans.join(", "))?; - - let matches: Vec = s - .matches - .iter() - .map(|&(pattern_id, _)| pattern_id.to_string()) - .collect(); - writeln!(f, " matches: {}", matches.join(", "))?; - writeln!(f, " fail: {}", s.fail.to_usize())?; - writeln!(f, " depth: {}", s.depth)?; - } - writeln!(f, "{}", "-".repeat(79))?; - writeln!(f, ")")?; - Ok(()) - } -} - -/// Iterate over all possible byte transitions given a sparse set. -fn sparse_iter(trans: &[(u8, S)], mut f: F) { - let mut byte = 0u16; - for &(b, id) in trans { - while byte < (b as u16) { - f(byte as u8, fail_id()); - byte += 1; - } - f(b, id); - byte += 1; - } - for b in byte..256 { - f(b as u8, fail_id()); - } -} - -/// Safely return two mutable borrows to two different locations in the given -/// slice. -/// -/// This panics if i == j. -fn get_two_mut(xs: &mut [T], i: usize, j: usize) -> (&mut T, &mut T) { - assert!(i != j, "{} must not be equal to {}", i, j); - if i < j { - let (before, after) = xs.split_at_mut(j); - (&mut before[i], &mut after[0]) - } else { - let (before, after) = xs.split_at_mut(i); - (&mut after[0], &mut before[j]) - } -} - -/// Return the given byte as its escaped string form. -fn escape(b: u8) -> String { - use std::ascii; - - String::from_utf8(ascii::escape_default(b).collect::>()).unwrap() -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn scratch() { - let nfa: NFA = Builder::new() - .dense_depth(0) - // .match_kind(MatchKind::LeftmostShortest) - // .match_kind(MatchKind::LeftmostLongest) - .match_kind(MatchKind::LeftmostFirst) - // .build(&["abcd", "ce", "b"]) - // .build(&["ab", "bc"]) - // .build(&["b", "bcd", "ce"]) - // .build(&["abc", "bx"]) - // .build(&["abc", "bd", "ab"]) - // .build(&["abcdefghi", "hz", "abcdefgh"]) - // .build(&["abcd", "bce", "b"]) - .build(&["abcdefg", "bcde", "bcdef"]) - .unwrap(); - println!("{:?}", nfa); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/api.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/api.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/api.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/api.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,625 +0,0 @@ -use std::u16; - -use crate::packed::pattern::Patterns; -use crate::packed::rabinkarp::RabinKarp; -use crate::packed::teddy::{self, Teddy}; -use crate::Match; - -/// This is a limit placed on the total number of patterns we're willing to try -/// and match at once. As more sophisticated algorithms are added, this number -/// may be increased. -const PATTERN_LIMIT: usize = 128; - -/// A knob for controlling the match semantics of a packed multiple string -/// searcher. -/// -/// This differs from the -/// [`MatchKind`](../enum.MatchKind.html) -/// type in the top-level crate module in that it doesn't support -/// "standard" match semantics, and instead only supports leftmost-first or -/// leftmost-longest. Namely, "standard" semantics cannot be easily supported -/// by packed searchers. -/// -/// For more information on the distinction between leftmost-first and -/// leftmost-longest, see the docs on the top-level `MatchKind` type. -/// -/// Unlike the top-level `MatchKind` type, the default match semantics for this -/// type are leftmost-first. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum MatchKind { - /// Use leftmost-first match semantics, which reports leftmost matches. - /// When there are multiple possible leftmost matches, the match - /// corresponding to the pattern that appeared earlier when constructing - /// the automaton is reported. - /// - /// This is the default. - LeftmostFirst, - /// Use leftmost-longest match semantics, which reports leftmost matches. - /// When there are multiple possible leftmost matches, the longest match - /// is chosen. - LeftmostLongest, - /// Hints that destructuring should not be exhaustive. - /// - /// This enum may grow additional variants, so this makes sure clients - /// don't count on exhaustive matching. (Otherwise, adding a new variant - /// could break existing code.) - #[doc(hidden)] - __Nonexhaustive, -} - -impl Default for MatchKind { - fn default() -> MatchKind { - MatchKind::LeftmostFirst - } -} - -/// The configuration for a packed multiple pattern searcher. -/// -/// The configuration is currently limited only to being able to select the -/// match semantics (leftmost-first or leftmost-longest) of a searcher. In the -/// future, more knobs may be made available. -/// -/// A configuration produces a [`packed::Builder`](struct.Builder.html), which -/// in turn can be used to construct a -/// [`packed::Searcher`](struct.Searcher.html) for searching. -/// -/// # Example -/// -/// This example shows how to use leftmost-longest semantics instead of the -/// default (leftmost-first). -/// -/// ``` -/// use aho_corasick::packed::{Config, MatchKind}; -/// -/// # fn example() -> Option<()> { -/// let searcher = Config::new() -/// .match_kind(MatchKind::LeftmostLongest) -/// .builder() -/// .add("foo") -/// .add("foobar") -/// .build()?; -/// let matches: Vec = searcher -/// .find_iter("foobar") -/// .map(|mat| mat.pattern()) -/// .collect(); -/// assert_eq!(vec![1], matches); -/// # Some(()) } -/// # if cfg!(target_arch = "x86_64") { -/// # example().unwrap() -/// # } else { -/// # assert!(example().is_none()); -/// # } -/// ``` -#[derive(Clone, Debug)] -pub struct Config { - kind: MatchKind, - force: Option, - force_teddy_fat: Option, - force_avx: Option, -} - -/// An internal option for forcing the use of a particular packed algorithm. -/// -/// When an algorithm is forced, if a searcher could not be constructed for it, -/// then no searcher will be returned even if an alternative algorithm would -/// work. -#[derive(Clone, Debug)] -enum ForceAlgorithm { - Teddy, - RabinKarp, -} - -impl Default for Config { - fn default() -> Config { - Config::new() - } -} - -impl Config { - /// Create a new default configuration. A default configuration uses - /// leftmost-first match semantics. - pub fn new() -> Config { - Config { - kind: MatchKind::LeftmostFirst, - force: None, - force_teddy_fat: None, - force_avx: None, - } - } - - /// Create a packed builder from this configuration. The builder can be - /// used to accumulate patterns and create a - /// [`Searcher`](struct.Searcher.html) - /// from them. - pub fn builder(&self) -> Builder { - Builder::from_config(self.clone()) - } - - /// Set the match semantics for this configuration. - pub fn match_kind(&mut self, kind: MatchKind) -> &mut Config { - self.kind = kind; - self - } - - /// An undocumented method for forcing the use of the Teddy algorithm. - /// - /// This is only exposed for more precise testing and benchmarks. Callers - /// should not use it as it is not part of the API stability guarantees of - /// this crate. - #[doc(hidden)] - pub fn force_teddy(&mut self, yes: bool) -> &mut Config { - if yes { - self.force = Some(ForceAlgorithm::Teddy); - } else { - self.force = None; - } - self - } - - /// An undocumented method for forcing the use of the Fat Teddy algorithm. - /// - /// This is only exposed for more precise testing and benchmarks. Callers - /// should not use it as it is not part of the API stability guarantees of - /// this crate. - #[doc(hidden)] - pub fn force_teddy_fat(&mut self, yes: Option) -> &mut Config { - self.force_teddy_fat = yes; - self - } - - /// An undocumented method for forcing the use of SSE (`Some(false)`) or - /// AVX (`Some(true)`) algorithms. - /// - /// This is only exposed for more precise testing and benchmarks. Callers - /// should not use it as it is not part of the API stability guarantees of - /// this crate. - #[doc(hidden)] - pub fn force_avx(&mut self, yes: Option) -> &mut Config { - self.force_avx = yes; - self - } - - /// An undocumented method for forcing the use of the Rabin-Karp algorithm. - /// - /// This is only exposed for more precise testing and benchmarks. Callers - /// should not use it as it is not part of the API stability guarantees of - /// this crate. - #[doc(hidden)] - pub fn force_rabin_karp(&mut self, yes: bool) -> &mut Config { - if yes { - self.force = Some(ForceAlgorithm::RabinKarp); - } else { - self.force = None; - } - self - } -} - -/// A builder for constructing a packed searcher from a collection of patterns. -/// -/// # Example -/// -/// This example shows how to use a builder to construct a searcher. By -/// default, leftmost-first match semantics are used. -/// -/// ``` -/// use aho_corasick::packed::{Builder, MatchKind}; -/// -/// # fn example() -> Option<()> { -/// let searcher = Builder::new() -/// .add("foobar") -/// .add("foo") -/// .build()?; -/// let matches: Vec = searcher -/// .find_iter("foobar") -/// .map(|mat| mat.pattern()) -/// .collect(); -/// assert_eq!(vec![0], matches); -/// # Some(()) } -/// # if cfg!(target_arch = "x86_64") { -/// # example().unwrap() -/// # } else { -/// # assert!(example().is_none()); -/// # } -/// ``` -#[derive(Clone, Debug)] -pub struct Builder { - /// The configuration of this builder and subsequent matcher. - config: Config, - /// Set to true if the builder detects that a matcher cannot be built. - inert: bool, - /// The patterns provided by the caller. - patterns: Patterns, -} - -impl Builder { - /// Create a new builder for constructing a multi-pattern searcher. This - /// constructor uses the default configuration. - pub fn new() -> Builder { - Builder::from_config(Config::new()) - } - - fn from_config(config: Config) -> Builder { - Builder { config, inert: false, patterns: Patterns::new() } - } - - /// Build a searcher from the patterns added to this builder so far. - pub fn build(&self) -> Option { - if self.inert || self.patterns.is_empty() { - return None; - } - let mut patterns = self.patterns.clone(); - patterns.set_match_kind(self.config.kind); - let rabinkarp = RabinKarp::new(&patterns); - // Effectively, we only want to return a searcher if we can use Teddy, - // since Teddy is our only fast packed searcher at the moment. - // Rabin-Karp is only used when searching haystacks smaller than what - // Teddy can support. Thus, the only way to get a Rabin-Karp searcher - // is to force it using undocumented APIs (for tests/benchmarks). - let (search_kind, minimum_len) = match self.config.force { - None | Some(ForceAlgorithm::Teddy) => { - let teddy = match self.build_teddy(&patterns) { - None => return None, - Some(teddy) => teddy, - }; - let minimum_len = teddy.minimum_len(); - (SearchKind::Teddy(teddy), minimum_len) - } - Some(ForceAlgorithm::RabinKarp) => (SearchKind::RabinKarp, 0), - }; - Some(Searcher { patterns, rabinkarp, search_kind, minimum_len }) - } - - fn build_teddy(&self, patterns: &Patterns) -> Option { - teddy::Builder::new() - .avx(self.config.force_avx) - .fat(self.config.force_teddy_fat) - .build(&patterns) - } - - /// Add the given pattern to this set to match. - /// - /// The order in which patterns are added is significant. Namely, when - /// using leftmost-first match semantics, then when multiple patterns can - /// match at a particular location, the pattern that was added first is - /// used as the match. - /// - /// If the number of patterns added exceeds the amount supported by packed - /// searchers, then the builder will stop accumulating patterns and render - /// itself inert. At this point, constructing a searcher will always return - /// `None`. - pub fn add>(&mut self, pattern: P) -> &mut Builder { - if self.inert { - return self; - } else if self.patterns.len() >= PATTERN_LIMIT { - self.inert = true; - self.patterns.reset(); - return self; - } - // Just in case PATTERN_LIMIT increases beyond u16::MAX. - assert!(self.patterns.len() <= u16::MAX as usize); - - let pattern = pattern.as_ref(); - if pattern.is_empty() { - self.inert = true; - self.patterns.reset(); - return self; - } - self.patterns.add(pattern); - self - } - - /// Add the given iterator of patterns to this set to match. - /// - /// The iterator must yield elements that can be converted into a `&[u8]`. - /// - /// The order in which patterns are added is significant. Namely, when - /// using leftmost-first match semantics, then when multiple patterns can - /// match at a particular location, the pattern that was added first is - /// used as the match. - /// - /// If the number of patterns added exceeds the amount supported by packed - /// searchers, then the builder will stop accumulating patterns and render - /// itself inert. At this point, constructing a searcher will always return - /// `None`. - pub fn extend(&mut self, patterns: I) -> &mut Builder - where - I: IntoIterator, - P: AsRef<[u8]>, - { - for p in patterns { - self.add(p); - } - self - } -} - -impl Default for Builder { - fn default() -> Builder { - Builder::new() - } -} - -/// A packed searcher for quickly finding occurrences of multiple patterns. -/// -/// If callers need more flexible construction, or if one wants to change the -/// match semantics (either leftmost-first or leftmost-longest), then one can -/// use the [`Config`](struct.Config.html) and/or -/// [`Builder`](struct.Builder.html) types for more fine grained control. -/// -/// # Example -/// -/// This example shows how to create a searcher from an iterator of patterns. -/// By default, leftmost-first match semantics are used. -/// -/// ``` -/// use aho_corasick::packed::{MatchKind, Searcher}; -/// -/// # fn example() -> Option<()> { -/// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; -/// let matches: Vec = searcher -/// .find_iter("foobar") -/// .map(|mat| mat.pattern()) -/// .collect(); -/// assert_eq!(vec![0], matches); -/// # Some(()) } -/// # if cfg!(target_arch = "x86_64") { -/// # example().unwrap() -/// # } else { -/// # assert!(example().is_none()); -/// # } -/// ``` -#[derive(Clone, Debug)] -pub struct Searcher { - patterns: Patterns, - rabinkarp: RabinKarp, - search_kind: SearchKind, - minimum_len: usize, -} - -#[derive(Clone, Debug)] -enum SearchKind { - Teddy(Teddy), - RabinKarp, -} - -impl Searcher { - /// A convenience function for constructing a searcher from an iterator - /// of things that can be converted to a `&[u8]`. - /// - /// If a searcher could not be constructed (either because of an - /// unsupported CPU or because there are too many patterns), then `None` - /// is returned. - /// - /// # Example - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::packed::{MatchKind, Searcher}; - /// - /// # fn example() -> Option<()> { - /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; - /// let matches: Vec = searcher - /// .find_iter("foobar") - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![0], matches); - /// # Some(()) } - /// # if cfg!(target_arch = "x86_64") { - /// # example().unwrap() - /// # } else { - /// # assert!(example().is_none()); - /// # } - /// ``` - pub fn new(patterns: I) -> Option - where - I: IntoIterator, - P: AsRef<[u8]>, - { - Builder::new().extend(patterns).build() - } - - /// Return the first occurrence of any of the patterns in this searcher, - /// according to its match semantics, in the given haystack. The `Match` - /// returned will include the identifier of the pattern that matched, which - /// corresponds to the index of the pattern (starting from `0`) in which it - /// was added. - /// - /// # Example - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::packed::{MatchKind, Searcher}; - /// - /// # fn example() -> Option<()> { - /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; - /// let mat = searcher.find("foobar")?; - /// assert_eq!(0, mat.pattern()); - /// assert_eq!(0, mat.start()); - /// assert_eq!(6, mat.end()); - /// # Some(()) } - /// # if cfg!(target_arch = "x86_64") { - /// # example().unwrap() - /// # } else { - /// # assert!(example().is_none()); - /// # } - /// ``` - pub fn find>(&self, haystack: B) -> Option { - self.find_at(haystack, 0) - } - - /// Return the first occurrence of any of the patterns in this searcher, - /// according to its match semantics, in the given haystack starting from - /// the given position. - /// - /// The `Match` returned will include the identifier of the pattern that - /// matched, which corresponds to the index of the pattern (starting from - /// `0`) in which it was added. The offsets in the `Match` will be relative - /// to the start of `haystack` (and not `at`). - /// - /// # Example - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::packed::{MatchKind, Searcher}; - /// - /// # fn example() -> Option<()> { - /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; - /// let mat = searcher.find_at("foofoobar", 3)?; - /// assert_eq!(0, mat.pattern()); - /// assert_eq!(3, mat.start()); - /// assert_eq!(9, mat.end()); - /// # Some(()) } - /// # if cfg!(target_arch = "x86_64") { - /// # example().unwrap() - /// # } else { - /// # assert!(example().is_none()); - /// # } - /// ``` - pub fn find_at>( - &self, - haystack: B, - at: usize, - ) -> Option { - let haystack = haystack.as_ref(); - match self.search_kind { - SearchKind::Teddy(ref teddy) => { - if haystack[at..].len() < teddy.minimum_len() { - return self.slow_at(haystack, at); - } - teddy.find_at(&self.patterns, haystack, at) - } - SearchKind::RabinKarp => { - self.rabinkarp.find_at(&self.patterns, haystack, at) - } - } - } - - /// Return an iterator of non-overlapping occurrences of the patterns in - /// this searcher, according to its match semantics, in the given haystack. - /// - /// # Example - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::packed::{MatchKind, Searcher}; - /// - /// # fn example() -> Option<()> { - /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; - /// let matches: Vec = searcher - /// .find_iter("foobar fooba foofoo") - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![0, 1, 1, 1], matches); - /// # Some(()) } - /// # if cfg!(target_arch = "x86_64") { - /// # example().unwrap() - /// # } else { - /// # assert!(example().is_none()); - /// # } - /// ``` - pub fn find_iter<'a, 'b, B: ?Sized + AsRef<[u8]>>( - &'a self, - haystack: &'b B, - ) -> FindIter<'a, 'b> { - FindIter { searcher: self, haystack: haystack.as_ref(), at: 0 } - } - - /// Returns the match kind used by this packed searcher. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::packed::{MatchKind, Searcher}; - /// - /// # fn example() -> Option<()> { - /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; - /// // leftmost-first is the default. - /// assert_eq!(&MatchKind::LeftmostFirst, searcher.match_kind()); - /// # Some(()) } - /// # if cfg!(target_arch = "x86_64") { - /// # example().unwrap() - /// # } else { - /// # assert!(example().is_none()); - /// # } - /// ``` - pub fn match_kind(&self) -> &MatchKind { - self.patterns.match_kind() - } - - /// Returns the minimum length of a haystack that is required in order for - /// packed searching to be effective. - /// - /// In some cases, the underlying packed searcher may not be able to search - /// very short haystacks. When that occurs, the implementation will defer - /// to a slower non-packed searcher (which is still generally faster than - /// Aho-Corasick for a small number of patterns). However, callers may - /// want to avoid ever using the slower variant, which one can do by - /// never passing a haystack shorter than the minimum length returned by - /// this method. - pub fn minimum_len(&self) -> usize { - self.minimum_len - } - - /// Returns the approximate total amount of heap used by this searcher, in - /// units of bytes. - pub fn heap_bytes(&self) -> usize { - self.patterns.heap_bytes() - + self.rabinkarp.heap_bytes() - + self.search_kind.heap_bytes() - } - - /// Use a slow (non-packed) searcher. - /// - /// This is useful when a packed searcher could be constructed, but could - /// not be used to search a specific haystack. For example, if Teddy was - /// built but the haystack is smaller than ~34 bytes, then Teddy might not - /// be able to run. - fn slow_at(&self, haystack: &[u8], at: usize) -> Option { - self.rabinkarp.find_at(&self.patterns, haystack, at) - } -} - -impl SearchKind { - fn heap_bytes(&self) -> usize { - match *self { - SearchKind::Teddy(ref ted) => ted.heap_bytes(), - SearchKind::RabinKarp => 0, - } - } -} - -/// An iterator over non-overlapping matches from a packed searcher. -/// -/// The lifetime `'s` refers to the lifetime of the underlying -/// [`Searcher`](struct.Searcher.html), while the lifetime `'h` refers to the -/// lifetime of the haystack being searched. -#[derive(Debug)] -pub struct FindIter<'s, 'h> { - searcher: &'s Searcher, - haystack: &'h [u8], - at: usize, -} - -impl<'s, 'h> Iterator for FindIter<'s, 'h> { - type Item = Match; - - fn next(&mut self) -> Option { - if self.at > self.haystack.len() { - return None; - } - match self.searcher.find_at(&self.haystack, self.at) { - None => None, - Some(c) => { - self.at = c.end; - Some(c) - } - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/mod.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,117 +0,0 @@ -/*! -A lower level API for packed multiple substring search, principally for a small -number of patterns. - -This sub-module provides vectorized routines for quickly finding matches of a -small number of patterns. In general, users of this crate shouldn't need to -interface with this module directly, as the primary -[`AhoCorasick`](../struct.AhoCorasick.html) -searcher will use these routines automatically as a prefilter when applicable. -However, in some cases, callers may want to bypass the Aho-Corasick machinery -entirely and use this vectorized searcher directly. - -# Overview - -The primary types in this sub-module are: - -* [`Searcher`](struct.Searcher.html) executes the actual search algorithm to - report matches in a haystack. -* [`Builder`](struct.Builder.html) accumulates patterns incrementally and can - construct a `Searcher`. -* [`Config`](struct.Config.html) permits tuning the searcher, and itself will - produce a `Builder` (which can then be used to build a `Searcher`). - Currently, the only tuneable knob are the match semantics, but this may be - expanded in the future. - -# Examples - -This example shows how to create a searcher from an iterator of patterns. -By default, leftmost-first match semantics are used. (See the top-level -[`MatchKind`](../enum.MatchKind.html) type for more details about match -semantics, which apply similarly to packed substring search.) - -``` -use aho_corasick::packed::{MatchKind, Searcher}; - -# fn example() -> Option<()> { -let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; -let matches: Vec = searcher - .find_iter("foobar") - .map(|mat| mat.pattern()) - .collect(); -assert_eq!(vec![0], matches); -# Some(()) } -# if cfg!(target_arch = "x86_64") { -# example().unwrap() -# } else { -# assert!(example().is_none()); -# } -``` - -This example shows how to use [`Config`](struct.Config.html) to change the -match semantics to leftmost-longest: - -``` -use aho_corasick::packed::{Config, MatchKind}; - -# fn example() -> Option<()> { -let searcher = Config::new() - .match_kind(MatchKind::LeftmostLongest) - .builder() - .add("foo") - .add("foobar") - .build()?; -let matches: Vec = searcher - .find_iter("foobar") - .map(|mat| mat.pattern()) - .collect(); -assert_eq!(vec![1], matches); -# Some(()) } -# if cfg!(target_arch = "x86_64") { -# example().unwrap() -# } else { -# assert!(example().is_none()); -# } -``` - -# Packed substring searching - -Packed substring searching refers to the use of SIMD (Single Instruction, -Multiple Data) to accelerate the detection of matches in a haystack. Unlike -conventional algorithms, such as Aho-Corasick, SIMD algorithms for substring -search tend to do better with a small number of patterns, where as Aho-Corasick -generally maintains reasonably consistent performance regardless of the number -of patterns you give it. Because of this, the vectorized searcher in this -sub-module cannot be used as a general purpose searcher, since building the -searcher may fail. However, in exchange, when searching for a small number of -patterns, searching can be quite a bit faster than Aho-Corasick (sometimes by -an order of magnitude). - -The key take away here is that constructing a searcher from a list of patterns -is a fallible operation. While the precise conditions under which building a -searcher can fail is specifically an implementation detail, here are some -common reasons: - -* Too many patterns were given. Typically, the limit is on the order of 100 or - so, but this limit may fluctuate based on available CPU features. -* The available packed algorithms require CPU features that aren't available. - For example, currently, this crate only provides packed algorithms for - `x86_64`. Therefore, constructing a packed searcher on any other target - (e.g., ARM) will always fail. -* Zero patterns were given, or one of the patterns given was empty. Packed - searchers require at least one pattern and that all patterns are non-empty. -* Something else about the nature of the patterns (typically based on - heuristics) suggests that a packed searcher would perform very poorly, so - no searcher is built. -*/ - -pub use crate::packed::api::{Builder, Config, FindIter, MatchKind, Searcher}; - -mod api; -mod pattern; -mod rabinkarp; -mod teddy; -#[cfg(test)] -mod tests; -#[cfg(target_arch = "x86_64")] -mod vector; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/pattern.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/pattern.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/pattern.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/pattern.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,318 +0,0 @@ -use std::cmp; -use std::fmt; -use std::mem; -use std::u16; -use std::usize; - -use crate::packed::api::MatchKind; - -/// The type used for representing a pattern identifier. -/// -/// We don't use `usize` here because our packed searchers don't scale to -/// huge numbers of patterns, so we keep things a bit smaller. -pub type PatternID = u16; - -/// A non-empty collection of non-empty patterns to search for. -/// -/// This collection of patterns is what is passed around to both execute -/// searches and to construct the searchers themselves. Namely, this permits -/// searches to avoid copying all of the patterns, and allows us to keep only -/// one copy throughout all packed searchers. -/// -/// Note that this collection is not a set. The same pattern can appear more -/// than once. -#[derive(Clone, Debug)] -pub struct Patterns { - /// The match semantics supported by this collection of patterns. - /// - /// The match semantics determines the order of the iterator over patterns. - /// For leftmost-first, patterns are provided in the same order as were - /// provided by the caller. For leftmost-longest, patterns are provided in - /// descending order of length, with ties broken by the order in which they - /// were provided by the caller. - kind: MatchKind, - /// The collection of patterns, indexed by their identifier. - by_id: Vec>, - /// The order of patterns defined for iteration, given by pattern - /// identifiers. The order of `by_id` and `order` is always the same for - /// leftmost-first semantics, but may be different for leftmost-longest - /// semantics. - order: Vec, - /// The length of the smallest pattern, in bytes. - minimum_len: usize, - /// The largest pattern identifier. This should always be equivalent to - /// the number of patterns minus one in this collection. - max_pattern_id: PatternID, - /// The total number of pattern bytes across the entire collection. This - /// is used for reporting total heap usage in constant time. - total_pattern_bytes: usize, -} - -impl Patterns { - /// Create a new collection of patterns for the given match semantics. The - /// ID of each pattern is the index of the pattern at which it occurs in - /// the `by_id` slice. - /// - /// If any of the patterns in the slice given are empty, then this panics. - /// Similarly, if the number of patterns given is zero, then this also - /// panics. - pub fn new() -> Patterns { - Patterns { - kind: MatchKind::default(), - by_id: vec![], - order: vec![], - minimum_len: usize::MAX, - max_pattern_id: 0, - total_pattern_bytes: 0, - } - } - - /// Add a pattern to this collection. - /// - /// This panics if the pattern given is empty. - pub fn add(&mut self, bytes: &[u8]) { - assert!(!bytes.is_empty()); - assert!(self.by_id.len() <= u16::MAX as usize); - - let id = self.by_id.len() as u16; - self.max_pattern_id = id; - self.order.push(id); - self.by_id.push(bytes.to_vec()); - self.minimum_len = cmp::min(self.minimum_len, bytes.len()); - self.total_pattern_bytes += bytes.len(); - } - - /// Set the match kind semantics for this collection of patterns. - /// - /// If the kind is not set, then the default is leftmost-first. - pub fn set_match_kind(&mut self, kind: MatchKind) { - match kind { - MatchKind::LeftmostFirst => { - self.order.sort(); - } - MatchKind::LeftmostLongest => { - let (order, by_id) = (&mut self.order, &mut self.by_id); - order.sort_by(|&id1, &id2| { - by_id[id1 as usize] - .len() - .cmp(&by_id[id2 as usize].len()) - .reverse() - }); - } - MatchKind::__Nonexhaustive => unreachable!(), - } - } - - /// Return the number of patterns in this collection. - /// - /// This is guaranteed to be greater than zero. - pub fn len(&self) -> usize { - self.by_id.len() - } - - /// Returns true if and only if this collection of patterns is empty. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the approximate total amount of heap used by these patterns, in - /// units of bytes. - pub fn heap_bytes(&self) -> usize { - self.order.len() * mem::size_of::() - + self.by_id.len() * mem::size_of::>() - + self.total_pattern_bytes - } - - /// Clears all heap memory associated with this collection of patterns and - /// resets all state such that it is a valid empty collection. - pub fn reset(&mut self) { - self.kind = MatchKind::default(); - self.by_id.clear(); - self.order.clear(); - self.minimum_len = usize::MAX; - self.max_pattern_id = 0; - } - - /// Return the maximum pattern identifier in this collection. This can be - /// useful in searchers for ensuring that the collection of patterns they - /// are provided at search time and at build time have the same size. - pub fn max_pattern_id(&self) -> PatternID { - assert_eq!((self.max_pattern_id + 1) as usize, self.len()); - self.max_pattern_id - } - - /// Returns the length, in bytes, of the smallest pattern. - /// - /// This is guaranteed to be at least one. - pub fn minimum_len(&self) -> usize { - self.minimum_len - } - - /// Returns the match semantics used by these patterns. - pub fn match_kind(&self) -> &MatchKind { - &self.kind - } - - /// Return the pattern with the given identifier. If such a pattern does - /// not exist, then this panics. - pub fn get(&self, id: PatternID) -> Pattern<'_> { - Pattern(&self.by_id[id as usize]) - } - - /// Return the pattern with the given identifier without performing bounds - /// checks. - /// - /// # Safety - /// - /// Callers must ensure that a pattern with the given identifier exists - /// before using this method. - #[cfg(target_arch = "x86_64")] - pub unsafe fn get_unchecked(&self, id: PatternID) -> Pattern<'_> { - Pattern(self.by_id.get_unchecked(id as usize)) - } - - /// Return an iterator over all the patterns in this collection, in the - /// order in which they should be matched. - /// - /// Specifically, in a naive multi-pattern matcher, the following is - /// guaranteed to satisfy the match semantics of this collection of - /// patterns: - /// - /// ```ignore - /// for i in 0..haystack.len(): - /// for p in patterns.iter(): - /// if haystack[i..].starts_with(p.bytes()): - /// return Match(p.id(), i, i + p.bytes().len()) - /// ``` - /// - /// Namely, among the patterns in a collection, if they are matched in - /// the order provided by this iterator, then the result is guaranteed - /// to satisfy the correct match semantics. (Either leftmost-first or - /// leftmost-longest.) - pub fn iter(&self) -> PatternIter<'_> { - PatternIter { patterns: self, i: 0 } - } -} - -/// An iterator over the patterns in the `Patterns` collection. -/// -/// The order of the patterns provided by this iterator is consistent with the -/// match semantics of the originating collection of patterns. -/// -/// The lifetime `'p` corresponds to the lifetime of the collection of patterns -/// this is iterating over. -#[derive(Debug)] -pub struct PatternIter<'p> { - patterns: &'p Patterns, - i: usize, -} - -impl<'p> Iterator for PatternIter<'p> { - type Item = (PatternID, Pattern<'p>); - - fn next(&mut self) -> Option<(PatternID, Pattern<'p>)> { - if self.i >= self.patterns.len() { - return None; - } - let id = self.patterns.order[self.i]; - let p = self.patterns.get(id); - self.i += 1; - Some((id, p)) - } -} - -/// A pattern that is used in packed searching. -#[derive(Clone)] -pub struct Pattern<'a>(&'a [u8]); - -impl<'a> fmt::Debug for Pattern<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Pattern") - .field("lit", &String::from_utf8_lossy(&self.0)) - .finish() - } -} - -impl<'p> Pattern<'p> { - /// Returns the length of this pattern, in bytes. - pub fn len(&self) -> usize { - self.0.len() - } - - /// Returns the bytes of this pattern. - pub fn bytes(&self) -> &[u8] { - &self.0 - } - - /// Returns the first `len` low nybbles from this pattern. If this pattern - /// is shorter than `len`, then this panics. - #[cfg(target_arch = "x86_64")] - pub fn low_nybbles(&self, len: usize) -> Vec { - let mut nybs = vec![]; - for &b in self.bytes().iter().take(len) { - nybs.push(b & 0xF); - } - nybs - } - - /// Returns true if this pattern is a prefix of the given bytes. - #[inline(always)] - pub fn is_prefix(&self, bytes: &[u8]) -> bool { - self.len() <= bytes.len() && self.equals(&bytes[..self.len()]) - } - - /// Returns true if and only if this pattern equals the given bytes. - #[inline(always)] - pub fn equals(&self, bytes: &[u8]) -> bool { - // Why not just use memcmp for this? Well, memcmp requires calling out - // to libc, and this routine is called in fairly hot code paths. Other - // than just calling out to libc, it also seems to result in worse - // codegen. By rolling our own memcpy in pure Rust, it seems to appear - // more friendly to the optimizer. - // - // This results in an improvement in just about every benchmark. Some - // smaller than others, but in some cases, up to 30% faster. - - if self.len() != bytes.len() { - return false; - } - if self.len() < 8 { - for (&b1, &b2) in self.bytes().iter().zip(bytes) { - if b1 != b2 { - return false; - } - } - return true; - } - // When we have 8 or more bytes to compare, then proceed in chunks of - // 8 at a time using unaligned loads. - let mut p1 = self.bytes().as_ptr(); - let mut p2 = bytes.as_ptr(); - let p1end = self.bytes()[self.len() - 8..].as_ptr(); - let p2end = bytes[bytes.len() - 8..].as_ptr(); - // SAFETY: Via the conditional above, we know that both `p1` and `p2` - // have the same length, so `p1 < p1end` implies that `p2 < p2end`. - // Thus, derefencing both `p1` and `p2` in the loop below is safe. - // - // Moreover, we set `p1end` and `p2end` to be 8 bytes before the actual - // end of of `p1` and `p2`. Thus, the final dereference outside of the - // loop is guaranteed to be valid. - // - // Finally, we needn't worry about 64-bit alignment here, since we - // do unaligned loads. - unsafe { - while p1 < p1end { - let v1 = (p1 as *const u64).read_unaligned(); - let v2 = (p2 as *const u64).read_unaligned(); - if v1 != v2 { - return false; - } - p1 = p1.add(8); - p2 = p2.add(8); - } - let v1 = (p1end as *const u64).read_unaligned(); - let v2 = (p2end as *const u64).read_unaligned(); - v1 == v2 - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/rabinkarp.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/rabinkarp.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/rabinkarp.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/rabinkarp.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,185 +0,0 @@ -use std::mem; - -use crate::packed::pattern::{PatternID, Patterns}; -use crate::Match; - -/// The type of the rolling hash used in the Rabin-Karp algorithm. -type Hash = usize; - -/// The number of buckets to store our patterns in. We don't want this to be -/// too big in order to avoid wasting memory, but we don't want it to be too -/// small either to avoid spending too much time confirming literals. -/// -/// The number of buckets MUST be a power of two. Otherwise, determining the -/// bucket from a hash will slow down the code considerably. Using a power -/// of two means `hash % NUM_BUCKETS` can compile down to a simple `and` -/// instruction. -const NUM_BUCKETS: usize = 64; - -/// An implementation of the Rabin-Karp algorithm. The main idea of this -/// algorithm is to maintain a rolling hash as it moves through the input, and -/// then check whether that hash corresponds to the same hash for any of the -/// patterns we're looking for. -/// -/// A draw back of naively scaling Rabin-Karp to multiple patterns is that -/// it requires all of the patterns to be the same length, which in turn -/// corresponds to the number of bytes to hash. We adapt this to work for -/// multiple patterns of varying size by fixing the number of bytes to hash -/// to be the length of the smallest pattern. We also split the patterns into -/// several buckets to hopefully make the confirmation step faster. -/// -/// Wikipedia has a decent explanation, if a bit heavy on the theory: -/// https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm -/// -/// But ESMAJ provides something a bit more concrete: -/// https://www-igm.univ-mlv.fr/~lecroq/string/node5.html -#[derive(Clone, Debug)] -pub struct RabinKarp { - /// The order of patterns in each bucket is significant. Namely, they are - /// arranged such that the first one to match is the correct match. This - /// may not necessarily correspond to the order provided by the caller. - /// For example, if leftmost-longest semantics are used, then the patterns - /// are sorted by their length in descending order. If leftmost-first - /// semantics are used, then the patterns are sorted by their pattern ID - /// in ascending order (which corresponds to the caller's order). - buckets: Vec>, - /// The length of the hashing window. Generally, this corresponds to the - /// length of the smallest pattern. - hash_len: usize, - /// The factor to subtract out of a hash before updating it with a new - /// byte. - hash_2pow: usize, - /// The maximum identifier of a pattern. This is used as a sanity check - /// to ensure that the patterns provided by the caller are the same as - /// the patterns that were used to compile the matcher. This sanity check - /// possibly permits safely eliminating bounds checks regardless of what - /// patterns are provided by the caller. - /// - /// (Currently, we don't use this to elide bounds checks since it doesn't - /// result in a measurable performance improvement, but we do use it for - /// better failure modes.) - max_pattern_id: PatternID, -} - -impl RabinKarp { - /// Compile a new Rabin-Karp matcher from the patterns given. - /// - /// This panics if any of the patterns in the collection are empty, or if - /// the collection is itself empty. - pub fn new(patterns: &Patterns) -> RabinKarp { - assert!(patterns.len() >= 1); - let hash_len = patterns.minimum_len(); - assert!(hash_len >= 1); - - let mut hash_2pow = 1usize; - for _ in 1..hash_len { - hash_2pow = hash_2pow.wrapping_shl(1); - } - - let mut rk = RabinKarp { - buckets: vec![vec![]; NUM_BUCKETS], - hash_len, - hash_2pow, - max_pattern_id: patterns.max_pattern_id(), - }; - for (id, pat) in patterns.iter() { - let hash = rk.hash(&pat.bytes()[..rk.hash_len]); - let bucket = hash % NUM_BUCKETS; - rk.buckets[bucket].push((hash, id)); - } - rk - } - - /// Return the first matching pattern in the given haystack, begining the - /// search at `at`. - pub fn find_at( - &self, - patterns: &Patterns, - haystack: &[u8], - mut at: usize, - ) -> Option { - assert_eq!(NUM_BUCKETS, self.buckets.len()); - assert_eq!( - self.max_pattern_id, - patterns.max_pattern_id(), - "Rabin-Karp must be called with same patterns it was built with", - ); - - if at + self.hash_len > haystack.len() { - return None; - } - let mut hash = self.hash(&haystack[at..at + self.hash_len]); - loop { - let bucket = &self.buckets[hash % NUM_BUCKETS]; - for &(phash, pid) in bucket { - if phash == hash { - if let Some(c) = self.verify(patterns, pid, haystack, at) { - return Some(c); - } - } - } - if at + self.hash_len >= haystack.len() { - return None; - } - hash = self.update_hash( - hash, - haystack[at], - haystack[at + self.hash_len], - ); - at += 1; - } - } - - /// Returns the approximate total amount of heap used by this searcher, in - /// units of bytes. - pub fn heap_bytes(&self) -> usize { - let num_patterns = self.max_pattern_id as usize + 1; - self.buckets.len() * mem::size_of::>() - + num_patterns * mem::size_of::<(Hash, PatternID)>() - } - - /// Verify whether the pattern with the given id matches at - /// `haystack[at..]`. - /// - /// We tag this function as `cold` because it helps improve codegen. - /// Intuitively, it would seem like inlining it would be better. However, - /// the only time this is called and a match is not found is when there - /// there is a hash collision, or when a prefix of a pattern matches but - /// the entire pattern doesn't match. This is hopefully fairly rare, and - /// if it does occur a lot, it's going to be slow no matter what we do. - #[cold] - fn verify( - &self, - patterns: &Patterns, - id: PatternID, - haystack: &[u8], - at: usize, - ) -> Option { - let pat = patterns.get(id); - if pat.is_prefix(&haystack[at..]) { - Some(Match::from_span(id as usize, at, at + pat.len())) - } else { - None - } - } - - /// Hash the given bytes. - fn hash(&self, bytes: &[u8]) -> Hash { - assert_eq!(self.hash_len, bytes.len()); - - let mut hash = 0usize; - for &b in bytes { - hash = hash.wrapping_shl(1).wrapping_add(b as usize); - } - hash - } - - /// Update the hash given based on removing `old_byte` at the beginning - /// of some byte string, and appending `new_byte` to the end of that same - /// byte string. - fn update_hash(&self, prev: Hash, old_byte: u8, new_byte: u8) -> Hash { - prev.wrapping_sub((old_byte as usize).wrapping_mul(self.hash_2pow)) - .wrapping_shl(1) - .wrapping_add(new_byte as usize) - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/compile.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/compile.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/compile.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/compile.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,414 +0,0 @@ -// See the README in this directory for an explanation of the Teddy algorithm. - -use std::cmp; -use std::collections::BTreeMap; -use std::fmt; - -use crate::packed::pattern::{PatternID, Patterns}; -use crate::packed::teddy::Teddy; - -/// A builder for constructing a Teddy matcher. -/// -/// The builder primarily permits fine grained configuration of the Teddy -/// matcher. Most options are made only available for testing/benchmarking -/// purposes. In reality, options are automatically determined by the nature -/// and number of patterns given to the builder. -#[derive(Clone, Debug)] -pub struct Builder { - /// When none, this is automatically determined. Otherwise, `false` means - /// slim Teddy is used (8 buckets) and `true` means fat Teddy is used - /// (16 buckets). Fat Teddy requires AVX2, so if that CPU feature isn't - /// available and Fat Teddy was requested, no matcher will be built. - fat: Option, - /// When none, this is automatically determined. Otherwise, `false` means - /// that 128-bit vectors will be used (up to SSSE3 instructions) where as - /// `true` means that 256-bit vectors will be used. As with `fat`, if - /// 256-bit vectors are requested and they aren't available, then a - /// searcher will not be built. - avx: Option, -} - -impl Default for Builder { - fn default() -> Builder { - Builder::new() - } -} - -impl Builder { - /// Create a new builder for configuring a Teddy matcher. - pub fn new() -> Builder { - Builder { fat: None, avx: None } - } - - /// Build a matcher for the set of patterns given. If a matcher could not - /// be built, then `None` is returned. - /// - /// Generally, a matcher isn't built if the necessary CPU features aren't - /// available, an unsupported target or if the searcher is believed to be - /// slower than standard techniques (i.e., if there are too many literals). - pub fn build(&self, patterns: &Patterns) -> Option { - self.build_imp(patterns) - } - - /// Require the use of Fat (true) or Slim (false) Teddy. Fat Teddy uses - /// 16 buckets where as Slim Teddy uses 8 buckets. More buckets are useful - /// for a larger set of literals. - /// - /// `None` is the default, which results in an automatic selection based - /// on the number of literals and available CPU features. - pub fn fat(&mut self, yes: Option) -> &mut Builder { - self.fat = yes; - self - } - - /// Request the use of 256-bit vectors (true) or 128-bit vectors (false). - /// Generally, a larger vector size is better since it either permits - /// matching more patterns or matching more bytes in the haystack at once. - /// - /// `None` is the default, which results in an automatic selection based on - /// the number of literals and available CPU features. - pub fn avx(&mut self, yes: Option) -> &mut Builder { - self.avx = yes; - self - } - - fn build_imp(&self, patterns: &Patterns) -> Option { - use crate::packed::teddy::runtime; - - // Most of the logic here is just about selecting the optimal settings, - // or perhaps even rejecting construction altogether. The choices - // we have are: fat (avx only) or not, ssse3 or avx2, and how many - // patterns we allow ourselves to search. Additionally, for testing - // and benchmarking, we permit callers to try to "force" a setting, - // and if the setting isn't allowed (e.g., forcing AVX when AVX isn't - // available), then we bail and return nothing. - - if patterns.len() > 64 { - return None; - } - let has_ssse3 = is_x86_feature_detected!("ssse3"); - let has_avx = is_x86_feature_detected!("avx2"); - let avx = if self.avx == Some(true) { - if !has_avx { - return None; - } - true - } else if self.avx == Some(false) { - if !has_ssse3 { - return None; - } - false - } else if !has_ssse3 && !has_avx { - return None; - } else { - has_avx - }; - let fat = match self.fat { - None => avx && patterns.len() > 32, - Some(false) => false, - Some(true) if !avx => return None, - Some(true) => true, - }; - - let mut compiler = Compiler::new(patterns, fat); - compiler.compile(); - let Compiler { buckets, masks, .. } = compiler; - // SAFETY: It is required that the builder only produce Teddy matchers - // that are allowed to run on the current CPU, since we later assume - // that the presence of (for example) TeddySlim1Mask256 means it is - // safe to call functions marked with the `avx2` target feature. - match (masks.len(), avx, fat) { - (1, false, _) => Some(Teddy { - buckets, - max_pattern_id: patterns.max_pattern_id(), - exec: runtime::Exec::TeddySlim1Mask128( - runtime::TeddySlim1Mask128 { - mask1: runtime::Mask128::new(masks[0]), - }, - ), - }), - (1, true, false) => Some(Teddy { - buckets, - max_pattern_id: patterns.max_pattern_id(), - exec: runtime::Exec::TeddySlim1Mask256( - runtime::TeddySlim1Mask256 { - mask1: runtime::Mask256::new(masks[0]), - }, - ), - }), - (1, true, true) => Some(Teddy { - buckets, - max_pattern_id: patterns.max_pattern_id(), - exec: runtime::Exec::TeddyFat1Mask256( - runtime::TeddyFat1Mask256 { - mask1: runtime::Mask256::new(masks[0]), - }, - ), - }), - (2, false, _) => Some(Teddy { - buckets, - max_pattern_id: patterns.max_pattern_id(), - exec: runtime::Exec::TeddySlim2Mask128( - runtime::TeddySlim2Mask128 { - mask1: runtime::Mask128::new(masks[0]), - mask2: runtime::Mask128::new(masks[1]), - }, - ), - }), - (2, true, false) => Some(Teddy { - buckets, - max_pattern_id: patterns.max_pattern_id(), - exec: runtime::Exec::TeddySlim2Mask256( - runtime::TeddySlim2Mask256 { - mask1: runtime::Mask256::new(masks[0]), - mask2: runtime::Mask256::new(masks[1]), - }, - ), - }), - (2, true, true) => Some(Teddy { - buckets, - max_pattern_id: patterns.max_pattern_id(), - exec: runtime::Exec::TeddyFat2Mask256( - runtime::TeddyFat2Mask256 { - mask1: runtime::Mask256::new(masks[0]), - mask2: runtime::Mask256::new(masks[1]), - }, - ), - }), - (3, false, _) => Some(Teddy { - buckets, - max_pattern_id: patterns.max_pattern_id(), - exec: runtime::Exec::TeddySlim3Mask128( - runtime::TeddySlim3Mask128 { - mask1: runtime::Mask128::new(masks[0]), - mask2: runtime::Mask128::new(masks[1]), - mask3: runtime::Mask128::new(masks[2]), - }, - ), - }), - (3, true, false) => Some(Teddy { - buckets, - max_pattern_id: patterns.max_pattern_id(), - exec: runtime::Exec::TeddySlim3Mask256( - runtime::TeddySlim3Mask256 { - mask1: runtime::Mask256::new(masks[0]), - mask2: runtime::Mask256::new(masks[1]), - mask3: runtime::Mask256::new(masks[2]), - }, - ), - }), - (3, true, true) => Some(Teddy { - buckets, - max_pattern_id: patterns.max_pattern_id(), - exec: runtime::Exec::TeddyFat3Mask256( - runtime::TeddyFat3Mask256 { - mask1: runtime::Mask256::new(masks[0]), - mask2: runtime::Mask256::new(masks[1]), - mask3: runtime::Mask256::new(masks[2]), - }, - ), - }), - _ => unreachable!(), - } - } -} - -/// A compiler is in charge of allocating patterns into buckets and generating -/// the masks necessary for searching. -#[derive(Clone)] -struct Compiler<'p> { - patterns: &'p Patterns, - buckets: Vec>, - masks: Vec, -} - -impl<'p> Compiler<'p> { - /// Create a new Teddy compiler for the given patterns. If `fat` is true, - /// then 16 buckets will be used instead of 8. - /// - /// This panics if any of the patterns given are empty. - fn new(patterns: &'p Patterns, fat: bool) -> Compiler<'p> { - let mask_len = cmp::min(3, patterns.minimum_len()); - assert!(1 <= mask_len && mask_len <= 3); - - Compiler { - patterns, - buckets: vec![vec![]; if fat { 16 } else { 8 }], - masks: vec![Mask::default(); mask_len], - } - } - - /// Compile the patterns in this compiler into buckets and masks. - fn compile(&mut self) { - let mut lonibble_to_bucket: BTreeMap, usize> = BTreeMap::new(); - for (id, pattern) in self.patterns.iter() { - // We try to be slightly clever in how we assign patterns into - // buckets. Generally speaking, we want patterns with the same - // prefix to be in the same bucket, since it minimizes the amount - // of time we spend churning through buckets in the verification - // step. - // - // So we could assign patterns with the same N-prefix (where N - // is the size of the mask, which is one of {1, 2, 3}) to the - // same bucket. However, case insensitive searches are fairly - // common, so we'd for example, ideally want to treat `abc` and - // `ABC` as if they shared the same prefix. ASCII has the nice - // property that the lower 4 bits of A and a are the same, so we - // therefore group patterns with the same low-nybbe-N-prefix into - // the same bucket. - // - // MOREOVER, this is actually necessary for correctness! In - // particular, by grouping patterns with the same prefix into the - // same bucket, we ensure that we preserve correct leftmost-first - // and leftmost-longest match semantics. In addition to the fact - // that `patterns.iter()` iterates in the correct order, this - // guarantees that all possible ambiguous matches will occur in - // the same bucket. The verification routine could be adjusted to - // support correct leftmost match semantics regardless of bucket - // allocation, but that results in a performance hit. It's much - // nicer to be able to just stop as soon as a match is found. - let lonybs = pattern.low_nybbles(self.masks.len()); - if let Some(&bucket) = lonibble_to_bucket.get(&lonybs) { - self.buckets[bucket].push(id); - } else { - // N.B. We assign buckets in reverse because it shouldn't have - // any influence on performance, but it does make it harder to - // get leftmost match semantics accidentally correct. - let bucket = (self.buckets.len() - 1) - - (id as usize % self.buckets.len()); - self.buckets[bucket].push(id); - lonibble_to_bucket.insert(lonybs, bucket); - } - } - for (bucket_index, bucket) in self.buckets.iter().enumerate() { - for &pat_id in bucket { - let pat = self.patterns.get(pat_id); - for (i, mask) in self.masks.iter_mut().enumerate() { - if self.buckets.len() == 8 { - mask.add_slim(bucket_index as u8, pat.bytes()[i]); - } else { - mask.add_fat(bucket_index as u8, pat.bytes()[i]); - } - } - } - } - } -} - -impl<'p> fmt::Debug for Compiler<'p> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut buckets = vec![vec![]; self.buckets.len()]; - for (i, bucket) in self.buckets.iter().enumerate() { - for &patid in bucket { - buckets[i].push(self.patterns.get(patid)); - } - } - f.debug_struct("Compiler") - .field("buckets", &buckets) - .field("masks", &self.masks) - .finish() - } -} - -/// Mask represents the low and high nybble masks that will be used during -/// search. Each mask is 32 bytes wide, although only the first 16 bytes are -/// used for the SSSE3 runtime. -/// -/// Each byte in the mask corresponds to a 8-bit bitset, where bit `i` is set -/// if and only if the corresponding nybble is in the ith bucket. The index of -/// the byte (0-15, inclusive) corresponds to the nybble. -/// -/// Each mask is used as the target of a shuffle, where the indices for the -/// shuffle are taken from the haystack. AND'ing the shuffles for both the -/// low and high masks together also results in 8-bit bitsets, but where bit -/// `i` is set if and only if the correspond *byte* is in the ith bucket. -/// -/// During compilation, masks are just arrays. But during search, these masks -/// are represented as 128-bit or 256-bit vectors. -/// -/// (See the README is this directory for more details.) -#[derive(Clone, Copy, Default)] -pub struct Mask { - lo: [u8; 32], - hi: [u8; 32], -} - -impl Mask { - /// Update this mask by adding the given byte to the given bucket. The - /// given bucket must be in the range 0-7. - /// - /// This is for "slim" Teddy, where there are only 8 buckets. - fn add_slim(&mut self, bucket: u8, byte: u8) { - assert!(bucket < 8); - - let byte_lo = (byte & 0xF) as usize; - let byte_hi = ((byte >> 4) & 0xF) as usize; - // When using 256-bit vectors, we need to set this bucket assignment in - // the low and high 128-bit portions of the mask. This allows us to - // process 32 bytes at a time. Namely, AVX2 shuffles operate on each - // of the 128-bit lanes, rather than the full 256-bit vector at once. - self.lo[byte_lo] |= 1 << bucket; - self.lo[byte_lo + 16] |= 1 << bucket; - self.hi[byte_hi] |= 1 << bucket; - self.hi[byte_hi + 16] |= 1 << bucket; - } - - /// Update this mask by adding the given byte to the given bucket. The - /// given bucket must be in the range 0-15. - /// - /// This is for "fat" Teddy, where there are 16 buckets. - fn add_fat(&mut self, bucket: u8, byte: u8) { - assert!(bucket < 16); - - let byte_lo = (byte & 0xF) as usize; - let byte_hi = ((byte >> 4) & 0xF) as usize; - // Unlike slim teddy, fat teddy only works with AVX2. For fat teddy, - // the high 128 bits of our mask correspond to buckets 8-15, while the - // low 128 bits correspond to buckets 0-7. - if bucket < 8 { - self.lo[byte_lo] |= 1 << bucket; - self.hi[byte_hi] |= 1 << bucket; - } else { - self.lo[byte_lo + 16] |= 1 << (bucket % 8); - self.hi[byte_hi + 16] |= 1 << (bucket % 8); - } - } - - /// Return the low 128 bits of the low-nybble mask. - pub fn lo128(&self) -> [u8; 16] { - let mut tmp = [0; 16]; - tmp.copy_from_slice(&self.lo[..16]); - tmp - } - - /// Return the full low-nybble mask. - pub fn lo256(&self) -> [u8; 32] { - self.lo - } - - /// Return the low 128 bits of the high-nybble mask. - pub fn hi128(&self) -> [u8; 16] { - let mut tmp = [0; 16]; - tmp.copy_from_slice(&self.hi[..16]); - tmp - } - - /// Return the full high-nybble mask. - pub fn hi256(&self) -> [u8; 32] { - self.hi - } -} - -impl fmt::Debug for Mask { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let (mut parts_lo, mut parts_hi) = (vec![], vec![]); - for i in 0..32 { - parts_lo.push(format!("{:02}: {:08b}", i, self.lo[i])); - parts_hi.push(format!("{:02}: {:08b}", i, self.hi[i])); - } - f.debug_struct("Mask") - .field("lo", &parts_lo) - .field("hi", &parts_hi) - .finish() - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/mod.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -#[cfg(target_arch = "x86_64")] -pub use crate::packed::teddy::compile::Builder; -#[cfg(not(target_arch = "x86_64"))] -pub use crate::packed::teddy::fallback::Builder; -#[cfg(not(target_arch = "x86_64"))] -pub use crate::packed::teddy::fallback::Teddy; -#[cfg(target_arch = "x86_64")] -pub use crate::packed::teddy::runtime::Teddy; - -#[cfg(target_arch = "x86_64")] -mod compile; -#[cfg(target_arch = "x86_64")] -mod runtime; - -#[cfg(not(target_arch = "x86_64"))] -mod fallback { - use crate::packed::pattern::Patterns; - use crate::Match; - - #[derive(Clone, Debug, Default)] - pub struct Builder(()); - - impl Builder { - pub fn new() -> Builder { - Builder(()) - } - - pub fn build(&self, _: &Patterns) -> Option { - None - } - - pub fn fat(&mut self, _: Option) -> &mut Builder { - self - } - - pub fn avx(&mut self, _: Option) -> &mut Builder { - self - } - } - - #[derive(Clone, Debug)] - pub struct Teddy(()); - - impl Teddy { - pub fn find_at( - &self, - _: &Patterns, - _: &[u8], - _: usize, - ) -> Option { - None - } - - pub fn minimum_len(&self) -> usize { - 0 - } - - pub fn heap_bytes(&self) -> usize { - 0 - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/README.md clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/README.md --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/README.md 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,386 +0,0 @@ -Teddy is a SIMD accelerated multiple substring matching algorithm. The name -and the core ideas in the algorithm were learned from the [Hyperscan][1_u] -project. The implementation in this repository was mostly motivated for use in -accelerating regex searches by searching for small sets of required literals -extracted from the regex. - - -# Background - -The key idea of Teddy is to do *packed* substring matching. In the literature, -packed substring matching is the idea of examining multiple bytes in a haystack -at a time to detect matches. Implementations of, for example, memchr (which -detects matches of a single byte) have been doing this for years. Only -recently, with the introduction of various SIMD instructions, has this been -extended to substring matching. The PCMPESTRI instruction (and its relatives), -for example, implements substring matching in hardware. It is, however, limited -to substrings of length 16 bytes or fewer, but this restriction is fine in a -regex engine, since we rarely care about the performance difference between -searching for a 16 byte literal and a 16 + N literal; 16 is already long -enough. The key downside of the PCMPESTRI instruction, on current (2016) CPUs -at least, is its latency and throughput. As a result, it is often faster to -do substring search with a Boyer-Moore (or Two-Way) variant and a well placed -memchr to quickly skip through the haystack. - -There are fewer results from the literature on packed substring matching, -and even fewer for packed multiple substring matching. Ben-Kiki et al. [2] -describes use of PCMPESTRI for substring matching, but is mostly theoretical -and hand-waves performance. There is other theoretical work done by Bille [3] -as well. - -The rest of the work in the field, as far as I'm aware, is by Faro and Kulekci -and is generally focused on multiple pattern search. Their first paper [4a] -introduces the concept of a fingerprint, which is computed for every block of -N bytes in every pattern. The haystack is then scanned N bytes at a time and -a fingerprint is computed in the same way it was computed for blocks in the -patterns. If the fingerprint corresponds to one that was found in a pattern, -then a verification step follows to confirm that one of the substrings with the -corresponding fingerprint actually matches at the current location. Various -implementation tricks are employed to make sure the fingerprint lookup is fast; -typically by truncating the fingerprint. (This may, of course, provoke more -steps in the verification process, so a balance must be struck.) - -The main downside of [4a] is that the minimum substring length is 32 bytes, -presumably because of how the algorithm uses certain SIMD instructions. This -essentially makes it useless for general purpose regex matching, where a small -number of short patterns is far more likely. - -Faro and Kulekci published another paper [4b] that is conceptually very similar -to [4a]. The key difference is that it uses the CRC32 instruction (introduced -as part of SSE 4.2) to compute fingerprint values. This also enables the -algorithm to work effectively on substrings as short as 7 bytes with 4 byte -windows. 7 bytes is unfortunately still too long. The window could be -technically shrunk to 2 bytes, thereby reducing minimum length to 3, but the -small window size ends up negating most performance benefits—and it's likely -the common case in a general purpose regex engine. - -Faro and Kulekci also published [4c] that appears to be intended as a -replacement to using PCMPESTRI. In particular, it is specifically motivated by -the high throughput/latency time of PCMPESTRI and therefore chooses other SIMD -instructions that are faster. While this approach works for short substrings, -I personally couldn't see a way to generalize it to multiple substring search. - -Faro and Kulekci have another paper [4d] that I haven't been able to read -because it is behind a paywall. - - -# Teddy - -Finally, we get to Teddy. If the above literature review is complete, then it -appears that Teddy is a novel algorithm. More than that, in my experience, it -completely blows away the competition for short substrings, which is exactly -what we want in a general purpose regex engine. Again, the algorithm appears -to be developed by the authors of [Hyperscan][1_u]. Hyperscan was open sourced -late 2015, and no earlier history could be found. Therefore, tracking the exact -provenance of the algorithm with respect to the published literature seems -difficult. - -At a high level, Teddy works somewhat similarly to the fingerprint algorithms -published by Faro and Kulekci, but Teddy does it in a way that scales a bit -better. Namely: - -1. Teddy's core algorithm scans the haystack in 16 (for SSE, or 32 for AVX) - byte chunks. 16 (or 32) is significant because it corresponds to the number - of bytes in a SIMD vector. -2. Bitwise operations are performed on each chunk to discover if any region of - it matches a set of precomputed fingerprints from the patterns. If there are - matches, then a verification step is performed. In this implementation, our - verification step is naive. This can be improved upon. - -The details to make this work are quite clever. First, we must choose how to -pick our fingerprints. In Hyperscan's implementation, I *believe* they use the -last N bytes of each substring, where N must be at least the minimum length of -any substring in the set being searched. In this implementation, we use the -first N bytes of each substring. (The tradeoffs between these choices aren't -yet clear to me.) We then must figure out how to quickly test whether an -occurrence of any fingerprint from the set of patterns appears in a 16 byte -block from the haystack. To keep things simple, let's assume N = 1 and examine -some examples to motivate the approach. Here are our patterns: - -```ignore -foo -bar -baz -``` - -The corresponding fingerprints, for N = 1, are `f`, `b` and `b`. Now let's set -our 16 byte block to: - -```ignore -bat cat foo bump -xxxxxxxxxxxxxxxx -``` - -To cut to the chase, Teddy works by using bitsets. In particular, Teddy creates -a mask that allows us to quickly compute membership of a fingerprint in a 16 -byte block that also tells which pattern the fingerprint corresponds to. In -this case, our fingerprint is a single byte, so an appropriate abstraction is -a map from a single byte to a list of patterns that contain that fingerprint: - -```ignore -f |--> foo -b |--> bar, baz -``` - -Now, all we need to do is figure out how to represent this map in vector space -and use normal SIMD operations to perform a lookup. The first simplification -we can make is to represent our patterns as bit fields occupying a single -byte. This is important, because a single SIMD vector can store 16 bytes. - -```ignore -f |--> 00000001 -b |--> 00000010, 00000100 -``` - -How do we perform lookup though? It turns out that SSSE3 introduced a very cool -instruction called PSHUFB. The instruction takes two SIMD vectors, `A` and `B`, -and returns a third vector `C`. All vectors are treated as 16 8-bit integers. -`C` is formed by `C[i] = A[B[i]]`. (This is a bit of a simplification, but true -for the purposes of this algorithm. For full details, see [Intel's Intrinsics -Guide][5_u].) This essentially lets us use the values in `B` to lookup values -in `A`. - -If we could somehow cause `B` to contain our 16 byte block from the haystack, -and if `A` could contain our bitmasks, then we'd end up with something like -this for `A`: - -```ignore - 0x00 0x01 ... 0x62 ... 0x66 ... 0xFF -A = 0 0 00000110 00000001 0 -``` - -And if `B` contains our window from our haystack, we could use shuffle to take -the values from `B` and use them to look up our bitsets in `A`. But of course, -we can't do this because `A` in the above example contains 256 bytes, which -is much larger than the size of a SIMD vector. - -Nybbles to the rescue! A nybble is 4 bits. Instead of one mask to hold all of -our bitsets, we can use two masks, where one mask corresponds to the lower four -bits of our fingerprint and the other mask corresponds to the upper four bits. -So our map now looks like: - -```ignore -'f' & 0xF = 0x6 |--> 00000001 -'f' >> 4 = 0x6 |--> 00000111 -'b' & 0xF = 0x2 |--> 00000110 -'b' >> 4 = 0x6 |--> 00000111 -``` - -Notice that the bitsets for each nybble correspond to the union of all -fingerprints that contain that nybble. For example, both `f` and `b` have the -same upper 4 bits but differ on the lower 4 bits. Putting this together, we -have `A0`, `A1` and `B`, where `A0` is our mask for the lower nybble, `A1` is -our mask for the upper nybble and `B` is our 16 byte block from the haystack: - -```ignore - 0x00 0x01 0x02 0x03 ... 0x06 ... 0xF -A0 = 0 0 00000110 0 00000001 0 -A1 = 0 0 0 0 00000111 0 -B = b a t _ t p -B = 0x62 0x61 0x74 0x20 0x74 0x70 -``` - -But of course, we can't use `B` with `PSHUFB` yet, since its values are 8 bits, -and we need indexes that are at most 4 bits (corresponding to one of 16 -values). We can apply the same transformation to split `B` into lower and upper -nybbles as we did `A`. As before, `B0` corresponds to the lower nybbles and -`B1` corresponds to the upper nybbles: - -```ignore - b a t _ c a t _ f o o _ b u m p -B0 = 0x2 0x1 0x4 0x0 0x3 0x1 0x4 0x0 0x6 0xF 0xF 0x0 0x2 0x5 0xD 0x0 -B1 = 0x6 0x6 0x7 0x2 0x6 0x6 0x7 0x2 0x6 0x6 0x6 0x2 0x6 0x7 0x6 0x7 -``` - -And now we have a nice correspondence. `B0` can index `A0` and `B1` can index -`A1`. Here's what we get when we apply `C0 = PSHUFB(A0, B0)`: - -```ignore - b a ... f o ... p - A0[0x2] A0[0x1] A0[0x6] A0[0xF] A0[0x0] -C0 = 00000110 0 00000001 0 0 -``` - -And `C1 = PSHUFB(A1, B1)`: - -```ignore - b a ... f o ... p - A1[0x6] A1[0x6] A1[0x6] A1[0x6] A1[0x7] -C1 = 00000111 00000111 00000111 00000111 0 -``` - -Notice how neither one of `C0` or `C1` is guaranteed to report fully correct -results all on its own. For example, `C1` claims that `b` is a fingerprint for -the pattern `foo` (since `A1[0x6] = 00000111`), and that `o` is a fingerprint -for all of our patterns. But if we combined `C0` and `C1` with an `AND` -operation: - -```ignore - b a ... f o ... p -C = 00000110 0 00000001 0 0 -``` - -Then we now have that `C[i]` contains a bitset corresponding to the matching -fingerprints in a haystack's 16 byte block, where `i` is the `ith` byte in that -block. - -Once we have that, we can look for the position of the least significant bit -in `C`. (Least significant because we only target `x86_64` here, which is -always little endian. Thus, the least significant bytes correspond to bytes -in our haystack at a lower address.) That position, modulo `8`, gives us -the pattern that the fingerprint matches. That position, integer divided by -`8`, also gives us the byte offset that the fingerprint occurs in inside the -16 byte haystack block. Using those two pieces of information, we can run a -verification procedure that tries to match all substrings containing that -fingerprint at that position in the haystack. - - -# Implementation notes - -The problem with the algorithm as described above is that it uses a single byte -for a fingerprint. This will work well if the fingerprints are rare in the -haystack (e.g., capital letters or special characters in normal English text), -but if the fingerprints are common, you'll wind up spending too much time in -the verification step, which effectively negates the performance benefits of -scanning 16 bytes at a time. Remember, the key to the performance of this -algorithm is to do as little work as possible per 16 (or 32) bytes. - -This algorithm can be extrapolated in a relatively straight-forward way to use -larger fingerprints. That is, instead of a single byte prefix, we might use a -two or three byte prefix. The implementation here implements N = {1, 2, 3} -and always picks the largest N possible. The rationale is that the bigger the -fingerprint, the fewer verification steps we'll do. Of course, if N is too -large, then we'll end up doing too much on each step. - -The way to extend it is: - -1. Add a mask for each byte in the fingerprint. (Remember that each mask is - composed of two SIMD vectors.) This results in a value of `C` for each byte - in the fingerprint while searching. -2. When testing each 16 (or 32) byte block, each value of `C` must be shifted - so that they are aligned. Once aligned, they should all be `AND`'d together. - This will give you only the bitsets corresponding to the full match of the - fingerprint. To do this, one needs to save the last byte (for N=2) or last - two bytes (for N=3) from the previous iteration, and then line them up with - the first one or two bytes of the next iteration. - -## Verification - -Verification generally follows the procedure outlined above. The tricky parts -are in the right formulation of operations to get our bits out of our vectors. -We have a limited set of operations available to us on SIMD vectors as 128-bit -or 256-bit numbers, so we wind up needing to rip out 2 (or 4) 64-bit integers -from our vectors, and then run our verification step on each of those. The -verification step looks at the least significant bit set, and from its -position, we can derive the byte offset and bucket. (Again, as described -above.) Once we know the bucket, we do a fairly naive exhaustive search for -every literal in that bucket. (Hyperscan is a bit smarter here and uses a hash -table, but I haven't had time to thoroughly explore that. A few initial -half-hearted attempts resulted in worse performance.) - -## AVX - -The AVX version of Teddy extrapolates almost perfectly from the SSE version. -The only hickup is that PALIGNR is used to align chunks in the 16-bit version, -and there is no equivalent instruction in AVX. AVX does have VPALIGNR, but it -only works within 128-bit lanes. So there's a bit of tomfoolery to get around -this by shuffling the vectors before calling VPALIGNR. - -The only other aspect to AVX is that since our masks are still fundamentally -16-bytes (0x0-0xF), they are duplicated to 32-bytes, so that they can apply to -32-byte chunks. - -## Fat Teddy - -In the version of Teddy described above, 8 buckets are used to group patterns -that we want to search for. However, when AVX is available, we can extend the -number of buckets to 16 by permitting each byte in our masks to use 16-bits -instead of 8-bits to represent the buckets it belongs to. (This variant is also -in Hyperscan.) However, what we give up is the ability to scan 32 bytes at a -time, even though we're using AVX. Instead, we have to scan 16 bytes at a time. -What we gain, though, is (hopefully) less work in our verification routine. -It patterns are more spread out across more buckets, then there should overall -be fewer false positives. In general, Fat Teddy permits us to grow our capacity -a bit and search for more literals before Teddy gets overwhelmed. - -The tricky part of Fat Teddy is in how we adjust our masks and our verification -procedure. For the masks, we simply represent the first 8 buckets in each of -the low 16 bytes, and then the second 8 buckets in each of the high 16 bytes. -Then, in the search loop, instead of loading 32 bytes from the haystack, we -load the same 16 bytes from the haystack into both the low and high 16 byte -portions of our 256-bit vector. So for example, a mask might look like this: - - bits: 00100001 00000000 ... 11000000 00000000 00000001 ... 00000000 - byte: 31 30 16 15 14 0 - offset: 15 14 0 15 14 0 - buckets: 8-15 8-15 8-15 0-7 0-7 0-7 - -Where `byte` is the position in the vector (higher numbers corresponding to -more significant bits), `offset` is the corresponding position in the haystack -chunk, and `buckets` corresponds to the bucket assignments for that particular -byte. - -In particular, notice that the bucket assignments for offset `0` are spread -out between bytes `0` and `16`. This works well for the chunk-by-chunk search -procedure, but verification really wants to process all bucket assignments for -each offset at once. Otherwise, we might wind up finding a match at offset -`1` in one the first 8 buckets, when we really should have reported a match -at offset `0` in one of the second 8 buckets. (Because we want the leftmost -match.) - -Thus, for verification, we rearrange the above vector such that it is a -sequence of 16-bit integers, where the least significant 16-bit integer -corresponds to all of the bucket assignments for offset `0`. So with the -above vector, the least significant 16-bit integer would be - - 11000000 000000 - -which was taken from bytes `16` and `0`. Then the verification step pretty much -runs as described, except with 16 buckets instead of 8. - - -# References - -- **[1]** [Hyperscan on GitHub](https://github.com/intel/hyperscan), - [webpage](https://www.hyperscan.io/) -- **[2a]** Ben-Kiki, O., Bille, P., Breslauer, D., Gasieniec, L., Grossi, R., - & Weimann, O. (2011). - _Optimal packed string matching_. - In LIPIcs-Leibniz International Proceedings in Informatics (Vol. 13). - Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik. - DOI: 10.4230/LIPIcs.FSTTCS.2011.423. - [PDF](https://drops.dagstuhl.de/opus/volltexte/2011/3355/pdf/37.pdf). -- **[2b]** Ben-Kiki, O., Bille, P., Breslauer, D., Ga̧sieniec, L., Grossi, R., - & Weimann, O. (2014). - _Towards optimal packed string matching_. - Theoretical Computer Science, 525, 111-129. - DOI: 10.1016/j.tcs.2013.06.013. - [PDF](https://www.cs.haifa.ac.il/~oren/Publications/bpsm.pdf). -- **[3]** Bille, P. (2011). - _Fast searching in packed strings_. - Journal of Discrete Algorithms, 9(1), 49-56. - DOI: 10.1016/j.jda.2010.09.003. - [PDF](https://www.sciencedirect.com/science/article/pii/S1570866710000353). -- **[4a]** Faro, S., & Külekci, M. O. (2012, October). - _Fast multiple string matching using streaming SIMD extensions technology_. - In String Processing and Information Retrieval (pp. 217-228). - Springer Berlin Heidelberg. - DOI: 10.1007/978-3-642-34109-0_23. - [PDF](https://www.dmi.unict.it/faro/papers/conference/faro32.pdf). -- **[4b]** Faro, S., & Külekci, M. O. (2013, September). - _Towards a Very Fast Multiple String Matching Algorithm for Short Patterns_. - In Stringology (pp. 78-91). - [PDF](https://www.dmi.unict.it/faro/papers/conference/faro36.pdf). -- **[4c]** Faro, S., & Külekci, M. O. (2013, January). - _Fast packed string matching for short patterns_. - In Proceedings of the Meeting on Algorithm Engineering & Expermiments - (pp. 113-121). - Society for Industrial and Applied Mathematics. - [PDF](https://arxiv.org/pdf/1209.6449.pdf). -- **[4d]** Faro, S., & Külekci, M. O. (2014). - _Fast and flexible packed string matching_. - Journal of Discrete Algorithms, 28, 61-72. - DOI: 10.1016/j.jda.2014.07.003. - -[1_u]: https://github.com/intel/hyperscan -[5_u]: https://software.intel.com/sites/landingpage/IntrinsicsGuide diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/runtime.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/runtime.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/runtime.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/teddy/runtime.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1204 +0,0 @@ -// See the README in this directory for an explanation of the Teddy algorithm. -// It is strongly recommended to peruse the README before trying to grok this -// code, as its use of SIMD is pretty opaque, although I tried to add comments -// where appropriate. -// -// Moreover, while there is a lot of code in this file, most of it is -// repeated variants of the same thing. Specifically, there are three Teddy -// variants: Slim 128-bit Teddy (8 buckets), Slim 256-bit Teddy (8 buckets) -// and Fat 256-bit Teddy (16 buckets). For each variant, there are three -// implementations, corresponding to mask lengths of 1, 2 and 3. Bringing it to -// a total of nine variants. Each one is structured roughly the same: -// -// while at <= len(haystack) - CHUNK_SIZE: -// let candidate = find_candidate_in_chunk(haystack, at) -// if not all zeroes(candidate): -// if match = verify(haystack, at, candidate): -// return match -// -// For the most part, this remains unchanged. The parts that vary are the -// verification routine (for slim vs fat Teddy) and the candidate extraction -// (based on the number of masks). -// -// In the code below, a "candidate" corresponds to a single vector with 8-bit -// lanes. Each lane is itself an 8-bit bitset, where the ith bit is set in the -// jth lane if and only if the byte occurring at position `j` is in the -// bucket `i` (where the `j`th position is the position in the current window -// of the haystack, which is always 16 or 32 bytes). Note to be careful here: -// the ith bit and the jth lane correspond to the least significant bits of the -// vector. So when visualizing how the current window of bytes is stored in a -// vector, you often need to flip it around. For example, the text `abcd` in a -// 4-byte vector would look like this: -// -// 01100100 01100011 01100010 01100001 -// d c b a -// -// When the mask length is 1, then finding the candidate is pretty straight -// forward: you just apply the shuffle indices (from the haystack window) to -// the masks, and then AND them together, as described in the README. But for -// masks of length 2 and 3, you need to keep a little state. Specifically, -// you need to store the final 1 (for mask length 2) or 2 (for mask length 3) -// bytes of the candidate for use when searching the next window. This is for -// handling matches that span two windows. -// -// With respect to the repeated code, it would likely be possible to reduce -// the number of copies of code below using polymorphism, but I find this -// formulation clearer instead of needing to reason through generics. However, -// I admit, there may be a simpler generic construction that I'm missing. -// -// All variants are fairly heavily tested in src/packed/tests.rs. - -use std::arch::x86_64::*; -use std::mem; - -use crate::packed::pattern::{PatternID, Patterns}; -use crate::packed::teddy::compile; -use crate::packed::vector::*; -use crate::Match; - -/// The Teddy runtime. -/// -/// A Teddy runtime can be used to quickly search for occurrences of one or -/// more patterns. While it does not scale to an arbitrary number of patterns -/// like Aho-Corasick, it does find occurrences for a small set of patterns -/// much more quickly than Aho-Corasick. -/// -/// Teddy cannot run on small haystacks below a certain size, which is -/// dependent on the type of matcher used. This size can be queried via the -/// `minimum_len` method. Violating this will result in a panic. -/// -/// Finally, when callers use a Teddy runtime, they must provide precisely the -/// patterns used to construct the Teddy matcher. Violating this will result -/// in either a panic or incorrect results, but will never sacrifice memory -/// safety. -#[derive(Clone, Debug)] -pub struct Teddy { - /// The allocation of patterns in buckets. This only contains the IDs of - /// patterns. In order to do full verification, callers must provide the - /// actual patterns when using Teddy. - pub buckets: Vec>, - /// The maximum identifier of a pattern. This is used as a sanity check to - /// ensure that the patterns provided by the caller are the same as the - /// patterns that were used to compile the matcher. This sanity check - /// permits safely eliminating bounds checks regardless of what patterns - /// are provided by the caller. - /// - /// Note that users of the aho-corasick crate cannot get this wrong. Only - /// code internal to this crate can get it wrong, since neither `Patterns` - /// type nor the Teddy runtime are public API items. - pub max_pattern_id: PatternID, - /// The actual runtime to use. - pub exec: Exec, -} - -impl Teddy { - /// Return the first occurrence of a match in the given haystack after or - /// starting at `at`. - /// - /// The patterns provided must be precisely the same patterns given to the - /// Teddy builder, otherwise this may panic or produce incorrect results. - /// - /// All matches are consistent with the match semantics (leftmost-first or - /// leftmost-longest) set on `pats`. - pub fn find_at( - &self, - pats: &Patterns, - haystack: &[u8], - at: usize, - ) -> Option { - // This assert is a bit subtle, but it's an important guarantee. - // Namely, if the maximum pattern ID seen by Teddy is the same as the - // one in the patterns given, then we are guaranteed that every pattern - // ID in all Teddy buckets are valid indices into `pats`. While this - // is nominally true, there is no guarantee that callers provide the - // same `pats` to both the Teddy builder and the searcher, which would - // otherwise make `find_at` unsafe to call. But this assert lets us - // keep this routine safe and eliminate an important bounds check in - // verification. - assert_eq!( - self.max_pattern_id, - pats.max_pattern_id(), - "teddy must be called with same patterns it was built with", - ); - // SAFETY: The haystack must have at least a minimum number of bytes - // for Teddy to be able to work. The minimum number varies depending on - // which matcher is used below. If this is violated, then it's possible - // for searching to do out-of-bounds writes. - assert!(haystack[at..].len() >= self.minimum_len()); - // SAFETY: The various Teddy matchers are always safe to call because - // the Teddy builder guarantees that a particular Exec variant is - // built only when it can be run the current CPU. That is, the Teddy - // builder will not produce a Exec::TeddySlim1Mask256 unless AVX2 is - // enabled. That is, our dynamic CPU feature detection is performed - // once in the builder, and we rely on the type system to avoid needing - // to do it again. - unsafe { - match self.exec { - Exec::TeddySlim1Mask128(ref e) => { - e.find_at(pats, self, haystack, at) - } - Exec::TeddySlim1Mask256(ref e) => { - e.find_at(pats, self, haystack, at) - } - Exec::TeddyFat1Mask256(ref e) => { - e.find_at(pats, self, haystack, at) - } - Exec::TeddySlim2Mask128(ref e) => { - e.find_at(pats, self, haystack, at) - } - Exec::TeddySlim2Mask256(ref e) => { - e.find_at(pats, self, haystack, at) - } - Exec::TeddyFat2Mask256(ref e) => { - e.find_at(pats, self, haystack, at) - } - Exec::TeddySlim3Mask128(ref e) => { - e.find_at(pats, self, haystack, at) - } - Exec::TeddySlim3Mask256(ref e) => { - e.find_at(pats, self, haystack, at) - } - Exec::TeddyFat3Mask256(ref e) => { - e.find_at(pats, self, haystack, at) - } - } - } - } - - /// Returns the minimum length of a haystack that must be provided by - /// callers to this Teddy searcher. Providing a haystack shorter than this - /// will result in a panic, but will never violate memory safety. - pub fn minimum_len(&self) -> usize { - // SAFETY: These values must be correct in order to ensure safety. - // The Teddy runtime assumes their haystacks have at least these - // lengths. Violating this will sacrifice memory safety. - match self.exec { - Exec::TeddySlim1Mask128(_) => 16, - Exec::TeddySlim1Mask256(_) => 32, - Exec::TeddyFat1Mask256(_) => 16, - Exec::TeddySlim2Mask128(_) => 17, - Exec::TeddySlim2Mask256(_) => 33, - Exec::TeddyFat2Mask256(_) => 17, - Exec::TeddySlim3Mask128(_) => 18, - Exec::TeddySlim3Mask256(_) => 34, - Exec::TeddyFat3Mask256(_) => 34, - } - } - - /// Returns the approximate total amount of heap used by this searcher, in - /// units of bytes. - pub fn heap_bytes(&self) -> usize { - let num_patterns = self.max_pattern_id as usize + 1; - self.buckets.len() * mem::size_of::>() - + num_patterns * mem::size_of::() - } - - /// Runs the verification routine for Slim 128-bit Teddy. - /// - /// The candidate given should be a collection of 8-bit bitsets (one bitset - /// per lane), where the ith bit is set in the jth lane if and only if the - /// byte occurring at `at + j` in `haystack` is in the bucket `i`. - /// - /// This is not safe to call unless the SSSE3 target feature is enabled. - /// The `target_feature` attribute is not applied since this function is - /// always forcefully inlined. - #[inline(always)] - unsafe fn verify128( - &self, - pats: &Patterns, - haystack: &[u8], - at: usize, - cand: __m128i, - ) -> Option { - debug_assert!(!is_all_zeroes128(cand)); - debug_assert_eq!(8, self.buckets.len()); - - // Convert the candidate into 64-bit chunks, and then verify each of - // those chunks. - let parts = unpack64x128(cand); - for (i, &part) in parts.iter().enumerate() { - let pos = at + i * 8; - if let Some(m) = self.verify64(pats, 8, haystack, pos, part) { - return Some(m); - } - } - None - } - - /// Runs the verification routine for Slim 256-bit Teddy. - /// - /// The candidate given should be a collection of 8-bit bitsets (one bitset - /// per lane), where the ith bit is set in the jth lane if and only if the - /// byte occurring at `at + j` in `haystack` is in the bucket `i`. - /// - /// This is not safe to call unless the AVX2 target feature is enabled. - /// The `target_feature` attribute is not applied since this function is - /// always forcefully inlined. - #[inline(always)] - unsafe fn verify256( - &self, - pats: &Patterns, - haystack: &[u8], - at: usize, - cand: __m256i, - ) -> Option { - debug_assert!(!is_all_zeroes256(cand)); - debug_assert_eq!(8, self.buckets.len()); - - // Convert the candidate into 64-bit chunks, and then verify each of - // those chunks. - let parts = unpack64x256(cand); - for (i, &part) in parts.iter().enumerate() { - let pos = at + i * 8; - if let Some(m) = self.verify64(pats, 8, haystack, pos, part) { - return Some(m); - } - } - None - } - - /// Runs the verification routine for Fat 256-bit Teddy. - /// - /// The candidate given should be a collection of 8-bit bitsets (one bitset - /// per lane), where the ith bit is set in the jth lane if and only if the - /// byte occurring at `at + (j < 16 ? j : j - 16)` in `haystack` is in the - /// bucket `j < 16 ? i : i + 8`. - /// - /// This is not safe to call unless the AVX2 target feature is enabled. - /// The `target_feature` attribute is not applied since this function is - /// always forcefully inlined. - #[inline(always)] - unsafe fn verify_fat256( - &self, - pats: &Patterns, - haystack: &[u8], - at: usize, - cand: __m256i, - ) -> Option { - debug_assert!(!is_all_zeroes256(cand)); - debug_assert_eq!(16, self.buckets.len()); - - // This is a bit tricky, but we basically want to convert our - // candidate, which looks like this - // - // a31 a30 ... a17 a16 a15 a14 ... a01 a00 - // - // where each a(i) is an 8-bit bitset corresponding to the activated - // buckets, to this - // - // a31 a15 a30 a14 a29 a13 ... a18 a02 a17 a01 a16 a00 - // - // Namely, for Fat Teddy, the high 128-bits of the candidate correspond - // to the same bytes in the haystack in the low 128-bits (so we only - // scan 16 bytes at a time), but are for buckets 8-15 instead of 0-7. - // - // The verification routine wants to look at all potentially matching - // buckets before moving on to the next lane. So for example, both - // a16 and a00 both correspond to the first byte in our window; a00 - // contains buckets 0-7 and a16 contains buckets 8-15. Specifically, - // a16 should be checked before a01. So the transformation shown above - // allows us to use our normal verification procedure with one small - // change: we treat each bitset as 16 bits instead of 8 bits. - - // Swap the 128-bit lanes in the candidate vector. - let swap = _mm256_permute4x64_epi64(cand, 0x4E); - // Interleave the bytes from the low 128-bit lanes, starting with - // cand first. - let r1 = _mm256_unpacklo_epi8(cand, swap); - // Interleave the bytes from the high 128-bit lanes, starting with - // cand first. - let r2 = _mm256_unpackhi_epi8(cand, swap); - // Now just take the 2 low 64-bit integers from both r1 and r2. We - // can drop the high 64-bit integers because they are a mirror image - // of the low 64-bit integers. All we care about are the low 128-bit - // lanes of r1 and r2. Combined, they contain all our 16-bit bitsets - // laid out in the desired order, as described above. - let parts = unpacklo64x256(r1, r2); - for (i, &part) in parts.iter().enumerate() { - let pos = at + i * 4; - if let Some(m) = self.verify64(pats, 16, haystack, pos, part) { - return Some(m); - } - } - None - } - - /// Verify whether there are any matches starting at or after `at` in the - /// given `haystack`. The candidate given should correspond to either 8-bit - /// (for 8 buckets) or 16-bit (16 buckets) bitsets. - #[inline(always)] - fn verify64( - &self, - pats: &Patterns, - bucket_count: usize, - haystack: &[u8], - at: usize, - mut cand: u64, - ) -> Option { - // N.B. While the bucket count is known from self.buckets.len(), - // requiring it as a parameter makes it easier for the optimizer to - // know its value, and thus produce more efficient codegen. - debug_assert!(bucket_count == 8 || bucket_count == 16); - while cand != 0 { - let bit = cand.trailing_zeros() as usize; - cand &= !(1 << bit); - - let at = at + (bit / bucket_count); - let bucket = bit % bucket_count; - if let Some(m) = self.verify_bucket(pats, haystack, bucket, at) { - return Some(m); - } - } - None - } - - /// Verify whether there are any matches starting at `at` in the given - /// `haystack` corresponding only to patterns in the given bucket. - #[inline(always)] - fn verify_bucket( - &self, - pats: &Patterns, - haystack: &[u8], - bucket: usize, - at: usize, - ) -> Option { - // Forcing this function to not inline and be "cold" seems to help - // the codegen for Teddy overall. Interestingly, this is good for a - // 16% boost in the sherlock/packed/teddy/name/alt1 benchmark (among - // others). Overall, this seems like a problem with codegen, since - // creating the Match itself is a very small amount of code. - #[cold] - #[inline(never)] - fn match_from_span( - pati: PatternID, - start: usize, - end: usize, - ) -> Match { - Match::from_span(pati as usize, start, end) - } - - // N.B. The bounds check for this bucket lookup *should* be elided - // since we assert the number of buckets in each `find_at` routine, - // and the compiler can prove that the `% 8` (or `% 16`) in callers - // of this routine will always be in bounds. - for &pati in &self.buckets[bucket] { - // SAFETY: This is safe because we are guaranteed that every - // index in a Teddy bucket is a valid index into `pats`. This - // guarantee is upheld by the assert checking `max_pattern_id` in - // the beginning of `find_at` above. - // - // This explicit bounds check elision is (amazingly) good for a - // 25-50% boost in some benchmarks, particularly ones with a lot - // of short literals. - let pat = unsafe { pats.get_unchecked(pati) }; - if pat.is_prefix(&haystack[at..]) { - return Some(match_from_span(pati, at, at + pat.len())); - } - } - None - } -} - -/// Exec represents the different search strategies supported by the Teddy -/// runtime. -/// -/// This enum is an important safety abstraction. Namely, callers should only -/// construct a variant in this enum if it is safe to execute its corresponding -/// target features on the current CPU. The 128-bit searchers require SSSE3, -/// while the 256-bit searchers require AVX2. -#[derive(Clone, Debug)] -pub enum Exec { - TeddySlim1Mask128(TeddySlim1Mask128), - TeddySlim1Mask256(TeddySlim1Mask256), - TeddyFat1Mask256(TeddyFat1Mask256), - TeddySlim2Mask128(TeddySlim2Mask128), - TeddySlim2Mask256(TeddySlim2Mask256), - TeddyFat2Mask256(TeddyFat2Mask256), - TeddySlim3Mask128(TeddySlim3Mask128), - TeddySlim3Mask256(TeddySlim3Mask256), - TeddyFat3Mask256(TeddyFat3Mask256), -} - -// Most of the code below remains undocumented because they are effectively -// repeated versions of themselves. The general structure is described in the -// README and in the comments above. - -#[derive(Clone, Debug)] -pub struct TeddySlim1Mask128 { - pub mask1: Mask128, -} - -impl TeddySlim1Mask128 { - #[target_feature(enable = "ssse3")] - unsafe fn find_at( - &self, - pats: &Patterns, - teddy: &Teddy, - haystack: &[u8], - mut at: usize, - ) -> Option { - debug_assert!(haystack[at..].len() >= teddy.minimum_len()); - // This assert helps eliminate bounds checks for bucket lookups in - // Teddy::verify_bucket, which has a small (3-4%) performance boost. - assert_eq!(8, teddy.buckets.len()); - - let len = haystack.len(); - while at <= len - 16 { - let c = self.candidate(haystack, at); - if !is_all_zeroes128(c) { - if let Some(m) = teddy.verify128(pats, haystack, at, c) { - return Some(m); - } - } - at += 16; - } - if at < len { - at = len - 16; - let c = self.candidate(haystack, at); - if !is_all_zeroes128(c) { - if let Some(m) = teddy.verify128(pats, haystack, at, c) { - return Some(m); - } - } - } - None - } - - #[inline(always)] - unsafe fn candidate(&self, haystack: &[u8], at: usize) -> __m128i { - debug_assert!(haystack[at..].len() >= 16); - - let chunk = loadu128(haystack, at); - members1m128(chunk, self.mask1) - } -} - -#[derive(Clone, Debug)] -pub struct TeddySlim1Mask256 { - pub mask1: Mask256, -} - -impl TeddySlim1Mask256 { - #[target_feature(enable = "avx2")] - unsafe fn find_at( - &self, - pats: &Patterns, - teddy: &Teddy, - haystack: &[u8], - mut at: usize, - ) -> Option { - debug_assert!(haystack[at..].len() >= teddy.minimum_len()); - // This assert helps eliminate bounds checks for bucket lookups in - // Teddy::verify_bucket, which has a small (3-4%) performance boost. - assert_eq!(8, teddy.buckets.len()); - - let len = haystack.len(); - while at <= len - 32 { - let c = self.candidate(haystack, at); - if !is_all_zeroes256(c) { - if let Some(m) = teddy.verify256(pats, haystack, at, c) { - return Some(m); - } - } - at += 32; - } - if at < len { - at = len - 32; - let c = self.candidate(haystack, at); - if !is_all_zeroes256(c) { - if let Some(m) = teddy.verify256(pats, haystack, at, c) { - return Some(m); - } - } - } - None - } - - #[inline(always)] - unsafe fn candidate(&self, haystack: &[u8], at: usize) -> __m256i { - debug_assert!(haystack[at..].len() >= 32); - - let chunk = loadu256(haystack, at); - members1m256(chunk, self.mask1) - } -} - -#[derive(Clone, Debug)] -pub struct TeddyFat1Mask256 { - pub mask1: Mask256, -} - -impl TeddyFat1Mask256 { - #[target_feature(enable = "avx2")] - unsafe fn find_at( - &self, - pats: &Patterns, - teddy: &Teddy, - haystack: &[u8], - mut at: usize, - ) -> Option { - debug_assert!(haystack[at..].len() >= teddy.minimum_len()); - // This assert helps eliminate bounds checks for bucket lookups in - // Teddy::verify_bucket, which has a small (3-4%) performance boost. - assert_eq!(16, teddy.buckets.len()); - - let len = haystack.len(); - while at <= len - 16 { - let c = self.candidate(haystack, at); - if !is_all_zeroes256(c) { - if let Some(m) = teddy.verify_fat256(pats, haystack, at, c) { - return Some(m); - } - } - at += 16; - } - if at < len { - at = len - 16; - let c = self.candidate(haystack, at); - if !is_all_zeroes256(c) { - if let Some(m) = teddy.verify_fat256(pats, haystack, at, c) { - return Some(m); - } - } - } - None - } - - #[inline(always)] - unsafe fn candidate(&self, haystack: &[u8], at: usize) -> __m256i { - debug_assert!(haystack[at..].len() >= 16); - - let chunk = _mm256_broadcastsi128_si256(loadu128(haystack, at)); - members1m256(chunk, self.mask1) - } -} - -#[derive(Clone, Debug)] -pub struct TeddySlim2Mask128 { - pub mask1: Mask128, - pub mask2: Mask128, -} - -impl TeddySlim2Mask128 { - #[target_feature(enable = "ssse3")] - unsafe fn find_at( - &self, - pats: &Patterns, - teddy: &Teddy, - haystack: &[u8], - mut at: usize, - ) -> Option { - debug_assert!(haystack[at..].len() >= teddy.minimum_len()); - // This assert helps eliminate bounds checks for bucket lookups in - // Teddy::verify_bucket, which has a small (3-4%) performance boost. - assert_eq!(8, teddy.buckets.len()); - - at += 1; - let len = haystack.len(); - let mut prev0 = ones128(); - while at <= len - 16 { - let c = self.candidate(haystack, at, &mut prev0); - if !is_all_zeroes128(c) { - if let Some(m) = teddy.verify128(pats, haystack, at - 1, c) { - return Some(m); - } - } - at += 16; - } - if at < len { - at = len - 16; - prev0 = ones128(); - - let c = self.candidate(haystack, at, &mut prev0); - if !is_all_zeroes128(c) { - if let Some(m) = teddy.verify128(pats, haystack, at - 1, c) { - return Some(m); - } - } - } - None - } - - #[inline(always)] - unsafe fn candidate( - &self, - haystack: &[u8], - at: usize, - prev0: &mut __m128i, - ) -> __m128i { - debug_assert!(haystack[at..].len() >= 16); - - let chunk = loadu128(haystack, at); - let (res0, res1) = members2m128(chunk, self.mask1, self.mask2); - let res0prev0 = _mm_alignr_epi8(res0, *prev0, 15); - _mm_and_si128(res0prev0, res1) - } -} - -#[derive(Clone, Debug)] -pub struct TeddySlim2Mask256 { - pub mask1: Mask256, - pub mask2: Mask256, -} - -impl TeddySlim2Mask256 { - #[target_feature(enable = "avx2")] - unsafe fn find_at( - &self, - pats: &Patterns, - teddy: &Teddy, - haystack: &[u8], - mut at: usize, - ) -> Option { - debug_assert!(haystack[at..].len() >= teddy.minimum_len()); - // This assert helps eliminate bounds checks for bucket lookups in - // Teddy::verify_bucket, which has a small (3-4%) performance boost. - assert_eq!(8, teddy.buckets.len()); - - at += 1; - let len = haystack.len(); - let mut prev0 = ones256(); - while at <= len - 32 { - let c = self.candidate(haystack, at, &mut prev0); - if !is_all_zeroes256(c) { - if let Some(m) = teddy.verify256(pats, haystack, at - 1, c) { - return Some(m); - } - } - at += 32; - } - if at < len { - at = len - 32; - prev0 = ones256(); - - let c = self.candidate(haystack, at, &mut prev0); - if !is_all_zeroes256(c) { - if let Some(m) = teddy.verify256(pats, haystack, at - 1, c) { - return Some(m); - } - } - } - None - } - - #[inline(always)] - unsafe fn candidate( - &self, - haystack: &[u8], - at: usize, - prev0: &mut __m256i, - ) -> __m256i { - debug_assert!(haystack[at..].len() >= 32); - - let chunk = loadu256(haystack, at); - let (res0, res1) = members2m256(chunk, self.mask1, self.mask2); - let res0prev0 = alignr256_15(res0, *prev0); - let res = _mm256_and_si256(res0prev0, res1); - *prev0 = res0; - res - } -} - -#[derive(Clone, Debug)] -pub struct TeddyFat2Mask256 { - pub mask1: Mask256, - pub mask2: Mask256, -} - -impl TeddyFat2Mask256 { - #[target_feature(enable = "avx2")] - unsafe fn find_at( - &self, - pats: &Patterns, - teddy: &Teddy, - haystack: &[u8], - mut at: usize, - ) -> Option { - debug_assert!(haystack[at..].len() >= teddy.minimum_len()); - // This assert helps eliminate bounds checks for bucket lookups in - // Teddy::verify_bucket, which has a small (3-4%) performance boost. - assert_eq!(16, teddy.buckets.len()); - - at += 1; - let len = haystack.len(); - let mut prev0 = ones256(); - while at <= len - 16 { - let c = self.candidate(haystack, at, &mut prev0); - if !is_all_zeroes256(c) { - if let Some(m) = teddy.verify_fat256(pats, haystack, at - 1, c) - { - return Some(m); - } - } - at += 16; - } - if at < len { - at = len - 16; - prev0 = ones256(); - - let c = self.candidate(haystack, at, &mut prev0); - if !is_all_zeroes256(c) { - if let Some(m) = teddy.verify_fat256(pats, haystack, at - 1, c) - { - return Some(m); - } - } - } - None - } - - #[inline(always)] - unsafe fn candidate( - &self, - haystack: &[u8], - at: usize, - prev0: &mut __m256i, - ) -> __m256i { - debug_assert!(haystack[at..].len() >= 16); - - let chunk = _mm256_broadcastsi128_si256(loadu128(haystack, at)); - let (res0, res1) = members2m256(chunk, self.mask1, self.mask2); - let res0prev0 = _mm256_alignr_epi8(res0, *prev0, 15); - let res = _mm256_and_si256(res0prev0, res1); - *prev0 = res0; - res - } -} - -#[derive(Clone, Debug)] -pub struct TeddySlim3Mask128 { - pub mask1: Mask128, - pub mask2: Mask128, - pub mask3: Mask128, -} - -impl TeddySlim3Mask128 { - #[target_feature(enable = "ssse3")] - unsafe fn find_at( - &self, - pats: &Patterns, - teddy: &Teddy, - haystack: &[u8], - mut at: usize, - ) -> Option { - debug_assert!(haystack[at..].len() >= teddy.minimum_len()); - // This assert helps eliminate bounds checks for bucket lookups in - // Teddy::verify_bucket, which has a small (3-4%) performance boost. - assert_eq!(8, teddy.buckets.len()); - - at += 2; - let len = haystack.len(); - let (mut prev0, mut prev1) = (ones128(), ones128()); - while at <= len - 16 { - let c = self.candidate(haystack, at, &mut prev0, &mut prev1); - if !is_all_zeroes128(c) { - if let Some(m) = teddy.verify128(pats, haystack, at - 2, c) { - return Some(m); - } - } - at += 16; - } - if at < len { - at = len - 16; - prev0 = ones128(); - prev1 = ones128(); - - let c = self.candidate(haystack, at, &mut prev0, &mut prev1); - if !is_all_zeroes128(c) { - if let Some(m) = teddy.verify128(pats, haystack, at - 2, c) { - return Some(m); - } - } - } - None - } - - #[inline(always)] - unsafe fn candidate( - &self, - haystack: &[u8], - at: usize, - prev0: &mut __m128i, - prev1: &mut __m128i, - ) -> __m128i { - debug_assert!(haystack[at..].len() >= 16); - - let chunk = loadu128(haystack, at); - let (res0, res1, res2) = - members3m128(chunk, self.mask1, self.mask2, self.mask3); - let res0prev0 = _mm_alignr_epi8(res0, *prev0, 14); - let res1prev1 = _mm_alignr_epi8(res1, *prev1, 15); - let res = _mm_and_si128(_mm_and_si128(res0prev0, res1prev1), res2); - *prev0 = res0; - *prev1 = res1; - res - } -} - -#[derive(Clone, Debug)] -pub struct TeddySlim3Mask256 { - pub mask1: Mask256, - pub mask2: Mask256, - pub mask3: Mask256, -} - -impl TeddySlim3Mask256 { - #[target_feature(enable = "avx2")] - unsafe fn find_at( - &self, - pats: &Patterns, - teddy: &Teddy, - haystack: &[u8], - mut at: usize, - ) -> Option { - debug_assert!(haystack[at..].len() >= teddy.minimum_len()); - // This assert helps eliminate bounds checks for bucket lookups in - // Teddy::verify_bucket, which has a small (3-4%) performance boost. - assert_eq!(8, teddy.buckets.len()); - - at += 2; - let len = haystack.len(); - let (mut prev0, mut prev1) = (ones256(), ones256()); - while at <= len - 32 { - let c = self.candidate(haystack, at, &mut prev0, &mut prev1); - if !is_all_zeroes256(c) { - if let Some(m) = teddy.verify256(pats, haystack, at - 2, c) { - return Some(m); - } - } - at += 32; - } - if at < len { - at = len - 32; - prev0 = ones256(); - prev1 = ones256(); - - let c = self.candidate(haystack, at, &mut prev0, &mut prev1); - if !is_all_zeroes256(c) { - if let Some(m) = teddy.verify256(pats, haystack, at - 2, c) { - return Some(m); - } - } - } - None - } - - #[inline(always)] - unsafe fn candidate( - &self, - haystack: &[u8], - at: usize, - prev0: &mut __m256i, - prev1: &mut __m256i, - ) -> __m256i { - debug_assert!(haystack[at..].len() >= 32); - - let chunk = loadu256(haystack, at); - let (res0, res1, res2) = - members3m256(chunk, self.mask1, self.mask2, self.mask3); - let res0prev0 = alignr256_14(res0, *prev0); - let res1prev1 = alignr256_15(res1, *prev1); - let res = - _mm256_and_si256(_mm256_and_si256(res0prev0, res1prev1), res2); - *prev0 = res0; - *prev1 = res1; - res - } -} - -#[derive(Clone, Debug)] -pub struct TeddyFat3Mask256 { - pub mask1: Mask256, - pub mask2: Mask256, - pub mask3: Mask256, -} - -impl TeddyFat3Mask256 { - #[target_feature(enable = "avx2")] - unsafe fn find_at( - &self, - pats: &Patterns, - teddy: &Teddy, - haystack: &[u8], - mut at: usize, - ) -> Option { - debug_assert!(haystack[at..].len() >= teddy.minimum_len()); - // This assert helps eliminate bounds checks for bucket lookups in - // Teddy::verify_bucket, which has a small (3-4%) performance boost. - assert_eq!(16, teddy.buckets.len()); - - at += 2; - let len = haystack.len(); - let (mut prev0, mut prev1) = (ones256(), ones256()); - while at <= len - 16 { - let c = self.candidate(haystack, at, &mut prev0, &mut prev1); - if !is_all_zeroes256(c) { - if let Some(m) = teddy.verify_fat256(pats, haystack, at - 2, c) - { - return Some(m); - } - } - at += 16; - } - if at < len { - at = len - 16; - prev0 = ones256(); - prev1 = ones256(); - - let c = self.candidate(haystack, at, &mut prev0, &mut prev1); - if !is_all_zeroes256(c) { - if let Some(m) = teddy.verify_fat256(pats, haystack, at - 2, c) - { - return Some(m); - } - } - } - None - } - - #[inline(always)] - unsafe fn candidate( - &self, - haystack: &[u8], - at: usize, - prev0: &mut __m256i, - prev1: &mut __m256i, - ) -> __m256i { - debug_assert!(haystack[at..].len() >= 16); - - let chunk = _mm256_broadcastsi128_si256(loadu128(haystack, at)); - let (res0, res1, res2) = - members3m256(chunk, self.mask1, self.mask2, self.mask3); - let res0prev0 = _mm256_alignr_epi8(res0, *prev0, 14); - let res1prev1 = _mm256_alignr_epi8(res1, *prev1, 15); - let res = - _mm256_and_si256(_mm256_and_si256(res0prev0, res1prev1), res2); - *prev0 = res0; - *prev1 = res1; - res - } -} - -/// A 128-bit mask for the low and high nybbles in a set of patterns. Each -/// lane `j` corresponds to a bitset where the `i`th bit is set if and only if -/// the nybble `j` is in the bucket `i` at a particular position. -#[derive(Clone, Copy, Debug)] -pub struct Mask128 { - lo: __m128i, - hi: __m128i, -} - -impl Mask128 { - /// Create a new SIMD mask from the mask produced by the Teddy builder. - pub fn new(mask: compile::Mask) -> Mask128 { - // SAFETY: This is safe since [u8; 16] has the same representation - // as __m128i. - unsafe { - Mask128 { - lo: mem::transmute(mask.lo128()), - hi: mem::transmute(mask.hi128()), - } - } - } -} - -/// A 256-bit mask for the low and high nybbles in a set of patterns. Each -/// lane `j` corresponds to a bitset where the `i`th bit is set if and only if -/// the nybble `j` is in the bucket `i` at a particular position. -/// -/// This is slightly tweaked dependending on whether Slim or Fat Teddy is being -/// used. For Slim Teddy, the bitsets in the lower 128-bits are the same as -/// the bitsets in the higher 128-bits, so that we can search 32 bytes at a -/// time. (Remember, the nybbles in the haystack are used as indices into these -/// masks, and 256-bit shuffles only operate on 128-bit lanes.) -/// -/// For Fat Teddy, the bitsets are not repeated, but instead, the high 128 -/// bits correspond to buckets 8-15. So that a bitset `00100010` has buckets -/// 1 and 5 set if it's in the lower 128 bits, but has buckets 9 and 13 set -/// if it's in the higher 128 bits. -#[derive(Clone, Copy, Debug)] -pub struct Mask256 { - lo: __m256i, - hi: __m256i, -} - -impl Mask256 { - /// Create a new SIMD mask from the mask produced by the Teddy builder. - pub fn new(mask: compile::Mask) -> Mask256 { - // SAFETY: This is safe since [u8; 32] has the same representation - // as __m256i. - unsafe { - Mask256 { - lo: mem::transmute(mask.lo256()), - hi: mem::transmute(mask.hi256()), - } - } - } -} - -// The "members" routines below are responsible for taking a chunk of bytes, -// a number of nybble masks and returning the result of using the masks to -// lookup bytes in the chunk. The results of the high and low nybble masks are -// AND'ed together, such that each candidate returned is a vector, with byte -// sized lanes, and where each lane is an 8-bit bitset corresponding to the -// buckets that contain the corresponding byte. -// -// In the case of masks of length greater than 1, callers will need to keep -// the results from the previous haystack's window, and then shift the vectors -// so that they all line up. Then they can be AND'ed together. - -/// Return a candidate for Slim 128-bit Teddy, where `chunk` corresponds to a -/// 16-byte window of the haystack (where the least significant byte -/// corresponds to the start of the window), and `mask1` corresponds to a -/// low/high mask for the first byte of all patterns that are being searched. -#[target_feature(enable = "ssse3")] -unsafe fn members1m128(chunk: __m128i, mask1: Mask128) -> __m128i { - let lomask = _mm_set1_epi8(0xF); - let hlo = _mm_and_si128(chunk, lomask); - let hhi = _mm_and_si128(_mm_srli_epi16(chunk, 4), lomask); - _mm_and_si128( - _mm_shuffle_epi8(mask1.lo, hlo), - _mm_shuffle_epi8(mask1.hi, hhi), - ) -} - -/// Return a candidate for Slim 256-bit Teddy, where `chunk` corresponds to a -/// 32-byte window of the haystack (where the least significant byte -/// corresponds to the start of the window), and `mask1` corresponds to a -/// low/high mask for the first byte of all patterns that are being searched. -/// -/// Note that this can also be used for Fat Teddy, where the high 128 bits in -/// `chunk` is the same as the low 128 bits, which corresponds to a 16 byte -/// window in the haystack. -#[target_feature(enable = "avx2")] -unsafe fn members1m256(chunk: __m256i, mask1: Mask256) -> __m256i { - let lomask = _mm256_set1_epi8(0xF); - let hlo = _mm256_and_si256(chunk, lomask); - let hhi = _mm256_and_si256(_mm256_srli_epi16(chunk, 4), lomask); - _mm256_and_si256( - _mm256_shuffle_epi8(mask1.lo, hlo), - _mm256_shuffle_epi8(mask1.hi, hhi), - ) -} - -/// Return candidates for Slim 128-bit Teddy, where `chunk` corresponds -/// to a 16-byte window of the haystack (where the least significant byte -/// corresponds to the start of the window), and the masks correspond to a -/// low/high mask for the first and second bytes of all patterns that are being -/// searched. The vectors returned correspond to candidates for the first and -/// second bytes in the patterns represented by the masks. -#[target_feature(enable = "ssse3")] -unsafe fn members2m128( - chunk: __m128i, - mask1: Mask128, - mask2: Mask128, -) -> (__m128i, __m128i) { - let lomask = _mm_set1_epi8(0xF); - let hlo = _mm_and_si128(chunk, lomask); - let hhi = _mm_and_si128(_mm_srli_epi16(chunk, 4), lomask); - let res0 = _mm_and_si128( - _mm_shuffle_epi8(mask1.lo, hlo), - _mm_shuffle_epi8(mask1.hi, hhi), - ); - let res1 = _mm_and_si128( - _mm_shuffle_epi8(mask2.lo, hlo), - _mm_shuffle_epi8(mask2.hi, hhi), - ); - (res0, res1) -} - -/// Return candidates for Slim 256-bit Teddy, where `chunk` corresponds -/// to a 32-byte window of the haystack (where the least significant byte -/// corresponds to the start of the window), and the masks correspond to a -/// low/high mask for the first and second bytes of all patterns that are being -/// searched. The vectors returned correspond to candidates for the first and -/// second bytes in the patterns represented by the masks. -/// -/// Note that this can also be used for Fat Teddy, where the high 128 bits in -/// `chunk` is the same as the low 128 bits, which corresponds to a 16 byte -/// window in the haystack. -#[target_feature(enable = "avx2")] -unsafe fn members2m256( - chunk: __m256i, - mask1: Mask256, - mask2: Mask256, -) -> (__m256i, __m256i) { - let lomask = _mm256_set1_epi8(0xF); - let hlo = _mm256_and_si256(chunk, lomask); - let hhi = _mm256_and_si256(_mm256_srli_epi16(chunk, 4), lomask); - let res0 = _mm256_and_si256( - _mm256_shuffle_epi8(mask1.lo, hlo), - _mm256_shuffle_epi8(mask1.hi, hhi), - ); - let res1 = _mm256_and_si256( - _mm256_shuffle_epi8(mask2.lo, hlo), - _mm256_shuffle_epi8(mask2.hi, hhi), - ); - (res0, res1) -} - -/// Return candidates for Slim 128-bit Teddy, where `chunk` corresponds -/// to a 16-byte window of the haystack (where the least significant byte -/// corresponds to the start of the window), and the masks correspond to a -/// low/high mask for the first, second and third bytes of all patterns that -/// are being searched. The vectors returned correspond to candidates for the -/// first, second and third bytes in the patterns represented by the masks. -#[target_feature(enable = "ssse3")] -unsafe fn members3m128( - chunk: __m128i, - mask1: Mask128, - mask2: Mask128, - mask3: Mask128, -) -> (__m128i, __m128i, __m128i) { - let lomask = _mm_set1_epi8(0xF); - let hlo = _mm_and_si128(chunk, lomask); - let hhi = _mm_and_si128(_mm_srli_epi16(chunk, 4), lomask); - let res0 = _mm_and_si128( - _mm_shuffle_epi8(mask1.lo, hlo), - _mm_shuffle_epi8(mask1.hi, hhi), - ); - let res1 = _mm_and_si128( - _mm_shuffle_epi8(mask2.lo, hlo), - _mm_shuffle_epi8(mask2.hi, hhi), - ); - let res2 = _mm_and_si128( - _mm_shuffle_epi8(mask3.lo, hlo), - _mm_shuffle_epi8(mask3.hi, hhi), - ); - (res0, res1, res2) -} - -/// Return candidates for Slim 256-bit Teddy, where `chunk` corresponds -/// to a 32-byte window of the haystack (where the least significant byte -/// corresponds to the start of the window), and the masks correspond to a -/// low/high mask for the first, second and third bytes of all patterns that -/// are being searched. The vectors returned correspond to candidates for the -/// first, second and third bytes in the patterns represented by the masks. -/// -/// Note that this can also be used for Fat Teddy, where the high 128 bits in -/// `chunk` is the same as the low 128 bits, which corresponds to a 16 byte -/// window in the haystack. -#[target_feature(enable = "avx2")] -unsafe fn members3m256( - chunk: __m256i, - mask1: Mask256, - mask2: Mask256, - mask3: Mask256, -) -> (__m256i, __m256i, __m256i) { - let lomask = _mm256_set1_epi8(0xF); - let hlo = _mm256_and_si256(chunk, lomask); - let hhi = _mm256_and_si256(_mm256_srli_epi16(chunk, 4), lomask); - let res0 = _mm256_and_si256( - _mm256_shuffle_epi8(mask1.lo, hlo), - _mm256_shuffle_epi8(mask1.hi, hhi), - ); - let res1 = _mm256_and_si256( - _mm256_shuffle_epi8(mask2.lo, hlo), - _mm256_shuffle_epi8(mask2.hi, hhi), - ); - let res2 = _mm256_and_si256( - _mm256_shuffle_epi8(mask3.lo, hlo), - _mm256_shuffle_epi8(mask3.hi, hhi), - ); - (res0, res1, res2) -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/tests.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/tests.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/tests.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/tests.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,568 +0,0 @@ -use std::collections::HashMap; -use std::usize; - -use crate::packed::{Config, MatchKind}; -use crate::Match; - -/// A description of a single test against a multi-pattern searcher. -/// -/// A single test may not necessarily pass on every configuration of a -/// searcher. The tests are categorized and grouped appropriately below. -#[derive(Clone, Debug, Eq, PartialEq)] -struct SearchTest { - /// The name of this test, for debugging. - name: &'static str, - /// The patterns to search for. - patterns: &'static [&'static str], - /// The text to search. - haystack: &'static str, - /// Each match is a triple of (pattern_index, start, end), where - /// pattern_index is an index into `patterns` and `start`/`end` are indices - /// into `haystack`. - matches: &'static [(usize, usize, usize)], -} - -struct SearchTestOwned { - offset: usize, - name: String, - patterns: Vec, - haystack: String, - matches: Vec<(usize, usize, usize)>, -} - -impl SearchTest { - fn variations(&self) -> Vec { - let mut tests = vec![]; - for i in 0..=260 { - tests.push(self.offset_prefix(i)); - tests.push(self.offset_suffix(i)); - tests.push(self.offset_both(i)); - } - tests - } - - fn offset_both(&self, off: usize) -> SearchTestOwned { - SearchTestOwned { - offset: off, - name: self.name.to_string(), - patterns: self.patterns.iter().map(|s| s.to_string()).collect(), - haystack: format!( - "{}{}{}", - "Z".repeat(off), - self.haystack, - "Z".repeat(off) - ), - matches: self - .matches - .iter() - .map(|&(id, s, e)| (id, s + off, e + off)) - .collect(), - } - } - - fn offset_prefix(&self, off: usize) -> SearchTestOwned { - SearchTestOwned { - offset: off, - name: self.name.to_string(), - patterns: self.patterns.iter().map(|s| s.to_string()).collect(), - haystack: format!("{}{}", "Z".repeat(off), self.haystack), - matches: self - .matches - .iter() - .map(|&(id, s, e)| (id, s + off, e + off)) - .collect(), - } - } - - fn offset_suffix(&self, off: usize) -> SearchTestOwned { - SearchTestOwned { - offset: off, - name: self.name.to_string(), - patterns: self.patterns.iter().map(|s| s.to_string()).collect(), - haystack: format!("{}{}", self.haystack, "Z".repeat(off)), - matches: self.matches.to_vec(), - } - } - - // fn to_owned(&self) -> SearchTestOwned { - // SearchTestOwned { - // name: self.name.to_string(), - // patterns: self.patterns.iter().map(|s| s.to_string()).collect(), - // haystack: self.haystack.to_string(), - // matches: self.matches.iter().cloned().collect(), - // } - // } -} - -/// Short-hand constructor for SearchTest. We use it a lot below. -macro_rules! t { - ($name:ident, $patterns:expr, $haystack:expr, $matches:expr) => { - SearchTest { - name: stringify!($name), - patterns: $patterns, - haystack: $haystack, - matches: $matches, - } - }; -} - -/// A collection of test groups. -type TestCollection = &'static [&'static [SearchTest]]; - -// Define several collections corresponding to the different type of match -// semantics supported. These collections have some overlap, but each -// collection should have some tests that no other collection has. - -/// Tests for leftmost-first match semantics. -const PACKED_LEFTMOST_FIRST: TestCollection = - &[BASICS, LEFTMOST, LEFTMOST_FIRST, REGRESSION, TEDDY]; - -/// Tests for leftmost-longest match semantics. -const PACKED_LEFTMOST_LONGEST: TestCollection = - &[BASICS, LEFTMOST, LEFTMOST_LONGEST, REGRESSION, TEDDY]; - -// Now define the individual tests that make up the collections above. - -/// A collection of tests for the that should always be true regardless of -/// match semantics. That is, all combinations of leftmost-{first, longest} -/// should produce the same answer. -const BASICS: &'static [SearchTest] = &[ - t!(basic001, &["a"], "", &[]), - t!(basic010, &["a"], "a", &[(0, 0, 1)]), - t!(basic020, &["a"], "aa", &[(0, 0, 1), (0, 1, 2)]), - t!(basic030, &["a"], "aaa", &[(0, 0, 1), (0, 1, 2), (0, 2, 3)]), - t!(basic040, &["a"], "aba", &[(0, 0, 1), (0, 2, 3)]), - t!(basic050, &["a"], "bba", &[(0, 2, 3)]), - t!(basic060, &["a"], "bbb", &[]), - t!(basic070, &["a"], "bababbbba", &[(0, 1, 2), (0, 3, 4), (0, 8, 9)]), - t!(basic100, &["aa"], "", &[]), - t!(basic110, &["aa"], "aa", &[(0, 0, 2)]), - t!(basic120, &["aa"], "aabbaa", &[(0, 0, 2), (0, 4, 6)]), - t!(basic130, &["aa"], "abbab", &[]), - t!(basic140, &["aa"], "abbabaa", &[(0, 5, 7)]), - t!(basic150, &["aaa"], "aaa", &[(0, 0, 3)]), - t!(basic200, &["abc"], "abc", &[(0, 0, 3)]), - t!(basic210, &["abc"], "zazabzabcz", &[(0, 6, 9)]), - t!(basic220, &["abc"], "zazabczabcz", &[(0, 3, 6), (0, 7, 10)]), - t!(basic300, &["a", "b"], "", &[]), - t!(basic310, &["a", "b"], "z", &[]), - t!(basic320, &["a", "b"], "b", &[(1, 0, 1)]), - t!(basic330, &["a", "b"], "a", &[(0, 0, 1)]), - t!( - basic340, - &["a", "b"], - "abba", - &[(0, 0, 1), (1, 1, 2), (1, 2, 3), (0, 3, 4),] - ), - t!( - basic350, - &["b", "a"], - "abba", - &[(1, 0, 1), (0, 1, 2), (0, 2, 3), (1, 3, 4),] - ), - t!(basic360, &["abc", "bc"], "xbc", &[(1, 1, 3),]), - t!(basic400, &["foo", "bar"], "", &[]), - t!(basic410, &["foo", "bar"], "foobar", &[(0, 0, 3), (1, 3, 6),]), - t!(basic420, &["foo", "bar"], "barfoo", &[(1, 0, 3), (0, 3, 6),]), - t!(basic430, &["foo", "bar"], "foofoo", &[(0, 0, 3), (0, 3, 6),]), - t!(basic440, &["foo", "bar"], "barbar", &[(1, 0, 3), (1, 3, 6),]), - t!(basic450, &["foo", "bar"], "bafofoo", &[(0, 4, 7),]), - t!(basic460, &["bar", "foo"], "bafofoo", &[(1, 4, 7),]), - t!(basic470, &["foo", "bar"], "fobabar", &[(1, 4, 7),]), - t!(basic480, &["bar", "foo"], "fobabar", &[(0, 4, 7),]), - t!(basic700, &["yabcdef", "abcdezghi"], "yabcdefghi", &[(0, 0, 7),]), - t!(basic710, &["yabcdef", "abcdezghi"], "yabcdezghi", &[(1, 1, 10),]), - t!( - basic720, - &["yabcdef", "bcdeyabc", "abcdezghi"], - "yabcdezghi", - &[(2, 1, 10),] - ), - t!(basic810, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4),]), - t!(basic820, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4),]), - t!(basic830, &["abc", "bc"], "zazabcz", &[(0, 3, 6),]), - t!( - basic840, - &["ab", "ba"], - "abababa", - &[(0, 0, 2), (0, 2, 4), (0, 4, 6),] - ), - t!(basic850, &["foo", "foo"], "foobarfoo", &[(0, 0, 3), (0, 6, 9),]), -]; - -/// Tests for leftmost match semantics. These should pass for both -/// leftmost-first and leftmost-longest match kinds. Stated differently, among -/// ambiguous matches, the longest match and the match that appeared first when -/// constructing the automaton should always be the same. -const LEFTMOST: &'static [SearchTest] = &[ - t!(leftmost000, &["ab", "ab"], "abcd", &[(0, 0, 2)]), - t!(leftmost030, &["a", "ab"], "aa", &[(0, 0, 1), (0, 1, 2)]), - t!(leftmost031, &["ab", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]), - t!(leftmost032, &["ab", "a"], "xayabbbz", &[(1, 1, 2), (0, 3, 5)]), - t!(leftmost300, &["abcd", "bce", "b"], "abce", &[(1, 1, 4)]), - t!(leftmost310, &["abcd", "ce", "bc"], "abce", &[(2, 1, 3)]), - t!(leftmost320, &["abcd", "bce", "ce", "b"], "abce", &[(1, 1, 4)]), - t!(leftmost330, &["abcd", "bce", "cz", "bc"], "abcz", &[(3, 1, 3)]), - t!(leftmost340, &["bce", "cz", "bc"], "bcz", &[(2, 0, 2)]), - t!(leftmost350, &["abc", "bd", "ab"], "abd", &[(2, 0, 2)]), - t!( - leftmost360, - &["abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(2, 0, 8),] - ), - t!( - leftmost370, - &["abcdefghi", "cde", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - leftmost380, - &["abcdefghi", "hz", "abcdefgh", "a"], - "abcdefghz", - &[(2, 0, 8),] - ), - t!( - leftmost390, - &["b", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - leftmost400, - &["h", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - leftmost410, - &["z", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8), (0, 8, 9),] - ), -]; - -/// Tests for non-overlapping leftmost-first match semantics. These tests -/// should generally be specific to leftmost-first, which means they should -/// generally fail under leftmost-longest semantics. -const LEFTMOST_FIRST: &'static [SearchTest] = &[ - t!(leftfirst000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), - t!(leftfirst020, &["abcd", "ab"], "abcd", &[(0, 0, 4)]), - t!(leftfirst030, &["ab", "ab"], "abcd", &[(0, 0, 2)]), - t!(leftfirst040, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (0, 3, 4)]), - t!(leftfirst100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(1, 1, 5)]), - t!(leftfirst110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]), - t!(leftfirst300, &["abcd", "b", "bce"], "abce", &[(1, 1, 2)]), - t!( - leftfirst310, - &["abcd", "b", "bce", "ce"], - "abce", - &[(1, 1, 2), (3, 2, 4),] - ), - t!( - leftfirst320, - &["a", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(0, 0, 1), (2, 7, 9),] - ), - t!(leftfirst330, &["a", "abab"], "abab", &[(0, 0, 1), (0, 2, 3)]), - t!( - leftfirst340, - &["abcdef", "x", "x", "x", "x", "x", "x", "abcde"], - "abcdef", - &[(0, 0, 6)] - ), -]; - -/// Tests for non-overlapping leftmost-longest match semantics. These tests -/// should generally be specific to leftmost-longest, which means they should -/// generally fail under leftmost-first semantics. -const LEFTMOST_LONGEST: &'static [SearchTest] = &[ - t!(leftlong000, &["ab", "abcd"], "abcd", &[(1, 0, 4)]), - t!(leftlong010, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4),]), - t!(leftlong040, &["a", "ab"], "a", &[(0, 0, 1)]), - t!(leftlong050, &["a", "ab"], "ab", &[(1, 0, 2)]), - t!(leftlong060, &["ab", "a"], "a", &[(1, 0, 1)]), - t!(leftlong070, &["ab", "a"], "ab", &[(0, 0, 2)]), - t!(leftlong100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(2, 1, 6)]), - t!(leftlong110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]), - t!(leftlong300, &["abcd", "b", "bce"], "abce", &[(2, 1, 4)]), - t!( - leftlong310, - &["a", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!(leftlong320, &["a", "abab"], "abab", &[(1, 0, 4)]), - t!(leftlong330, &["abcd", "b", "ce"], "abce", &[(1, 1, 2), (2, 2, 4),]), - t!(leftlong340, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (1, 3, 5)]), -]; - -/// Regression tests that are applied to all combinations. -/// -/// If regression tests are needed for specific match semantics, then add them -/// to the appropriate group above. -const REGRESSION: &'static [SearchTest] = &[ - t!(regression010, &["inf", "ind"], "infind", &[(0, 0, 3), (1, 3, 6),]), - t!(regression020, &["ind", "inf"], "infind", &[(1, 0, 3), (0, 3, 6),]), - t!( - regression030, - &["libcore/", "libstd/"], - "libcore/char/methods.rs", - &[(0, 0, 8),] - ), - t!( - regression040, - &["libstd/", "libcore/"], - "libcore/char/methods.rs", - &[(1, 0, 8),] - ), - t!( - regression050, - &["\x00\x00\x01", "\x00\x00\x00"], - "\x00\x00\x00", - &[(1, 0, 3),] - ), - t!( - regression060, - &["\x00\x00\x00", "\x00\x00\x01"], - "\x00\x00\x00", - &[(0, 0, 3),] - ), -]; - -const TEDDY: &'static [SearchTest] = &[ - t!( - teddy010, - &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"], - "abcdefghijk", - &[ - (0, 0, 1), - (1, 1, 2), - (2, 2, 3), - (3, 3, 4), - (4, 4, 5), - (5, 5, 6), - (6, 6, 7), - (7, 7, 8), - (8, 8, 9), - (9, 9, 10), - (10, 10, 11) - ] - ), - t!( - teddy020, - &["ab", "bc", "cd", "de", "ef", "fg", "gh", "hi", "ij", "jk", "kl"], - "abcdefghijk", - &[(0, 0, 2), (2, 2, 4), (4, 4, 6), (6, 6, 8), (8, 8, 10),] - ), - t!( - teddy030, - &["abc"], - "abcdefghijklmnopqrstuvwxyzabcdefghijk", - &[(0, 0, 3), (0, 26, 29)] - ), -]; - -// Now define a test for each combination of things above that we want to run. -// Since there are a few different combinations for each collection of tests, -// we define a couple of macros to avoid repetition drudgery. The testconfig -// macro constructs the automaton from a given match kind, and runs the search -// tests one-by-one over the given collection. The `with` parameter allows one -// to configure the config with additional parameters. The testcombo macro -// invokes testconfig in precisely this way: it sets up several tests where -// each one turns a different knob on Config. - -macro_rules! testconfig { - ($name:ident, $collection:expr, $with:expr) => { - #[test] - fn $name() { - run_search_tests($collection, |test| { - let mut config = Config::new(); - $with(&mut config); - config - .builder() - .extend(test.patterns.iter().map(|p| p.as_bytes())) - .build() - .unwrap() - .find_iter(&test.haystack) - .collect() - }); - } - }; -} - -#[cfg(target_arch = "x86_64")] -testconfig!( - search_default_leftmost_first, - PACKED_LEFTMOST_FIRST, - |_: &mut Config| {} -); - -#[cfg(target_arch = "x86_64")] -testconfig!( - search_default_leftmost_longest, - PACKED_LEFTMOST_LONGEST, - |c: &mut Config| { - c.match_kind(MatchKind::LeftmostLongest); - } -); - -#[cfg(target_arch = "x86_64")] -testconfig!( - search_teddy_leftmost_first, - PACKED_LEFTMOST_FIRST, - |c: &mut Config| { - c.force_teddy(true); - } -); - -#[cfg(target_arch = "x86_64")] -testconfig!( - search_teddy_leftmost_longest, - PACKED_LEFTMOST_LONGEST, - |c: &mut Config| { - c.force_teddy(true).match_kind(MatchKind::LeftmostLongest); - } -); - -#[cfg(target_arch = "x86_64")] -testconfig!( - search_teddy_ssse3_leftmost_first, - PACKED_LEFTMOST_FIRST, - |c: &mut Config| { - c.force_teddy(true); - if is_x86_feature_detected!("ssse3") { - c.force_avx(Some(false)); - } - } -); - -#[cfg(target_arch = "x86_64")] -testconfig!( - search_teddy_ssse3_leftmost_longest, - PACKED_LEFTMOST_LONGEST, - |c: &mut Config| { - c.force_teddy(true).match_kind(MatchKind::LeftmostLongest); - if is_x86_feature_detected!("ssse3") { - c.force_avx(Some(false)); - } - } -); - -#[cfg(target_arch = "x86_64")] -testconfig!( - search_teddy_avx2_leftmost_first, - PACKED_LEFTMOST_FIRST, - |c: &mut Config| { - c.force_teddy(true); - if is_x86_feature_detected!("avx2") { - c.force_avx(Some(true)); - } - } -); - -#[cfg(target_arch = "x86_64")] -testconfig!( - search_teddy_avx2_leftmost_longest, - PACKED_LEFTMOST_LONGEST, - |c: &mut Config| { - c.force_teddy(true).match_kind(MatchKind::LeftmostLongest); - if is_x86_feature_detected!("avx2") { - c.force_avx(Some(true)); - } - } -); - -#[cfg(target_arch = "x86_64")] -testconfig!( - search_teddy_fat_leftmost_first, - PACKED_LEFTMOST_FIRST, - |c: &mut Config| { - c.force_teddy(true); - if is_x86_feature_detected!("avx2") { - c.force_teddy_fat(Some(true)); - } - } -); - -#[cfg(target_arch = "x86_64")] -testconfig!( - search_teddy_fat_leftmost_longest, - PACKED_LEFTMOST_LONGEST, - |c: &mut Config| { - c.force_teddy(true).match_kind(MatchKind::LeftmostLongest); - if is_x86_feature_detected!("avx2") { - c.force_teddy_fat(Some(true)); - } - } -); - -testconfig!( - search_rabinkarp_leftmost_first, - PACKED_LEFTMOST_FIRST, - |c: &mut Config| { - c.force_rabin_karp(true); - } -); - -testconfig!( - search_rabinkarp_leftmost_longest, - PACKED_LEFTMOST_LONGEST, - |c: &mut Config| { - c.force_rabin_karp(true).match_kind(MatchKind::LeftmostLongest); - } -); - -#[test] -fn search_tests_have_unique_names() { - let assert = |constname, tests: &[SearchTest]| { - let mut seen = HashMap::new(); // map from test name to position - for (i, test) in tests.iter().enumerate() { - if !seen.contains_key(test.name) { - seen.insert(test.name, i); - } else { - let last = seen[test.name]; - panic!( - "{} tests have duplicate names at positions {} and {}", - constname, last, i - ); - } - } - }; - assert("BASICS", BASICS); - assert("LEFTMOST", LEFTMOST); - assert("LEFTMOST_FIRST", LEFTMOST_FIRST); - assert("LEFTMOST_LONGEST", LEFTMOST_LONGEST); - assert("REGRESSION", REGRESSION); - assert("TEDDY", TEDDY); -} - -fn run_search_tests Vec>( - which: TestCollection, - mut f: F, -) { - let get_match_triples = - |matches: Vec| -> Vec<(usize, usize, usize)> { - matches - .into_iter() - .map(|m| (m.pattern(), m.start(), m.end())) - .collect() - }; - for &tests in which { - for spec in tests { - for test in spec.variations() { - assert_eq!( - test.matches, - get_match_triples(f(&test)).as_slice(), - "test: {}, patterns: {:?}, haystack: {:?}, offset: {:?}", - test.name, - test.patterns, - test.haystack, - test.offset, - ); - } - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/vector.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/vector.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/vector.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/packed/vector.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,181 +0,0 @@ -// This file contains a set of fairly generic utility functions when working -// with SIMD vectors. -// -// SAFETY: All of the routines below are unsafe to call because they assume -// the necessary CPU target features in order to use particular vendor -// intrinsics. Calling these routines when the underlying CPU does not support -// the appropriate target features is NOT safe. Callers must ensure this -// themselves. -// -// Note that it may not look like this safety invariant is being upheld when -// these routines are called. Namely, the CPU feature check is typically pretty -// far away from when these routines are used. Instead, we rely on the fact -// that certain types serve as a guaranteed receipt that pertinent target -// features are enabled. For example, the only way TeddySlim3Mask256 can be -// constructed is if the AVX2 CPU feature is available. Thus, any code running -// inside of TeddySlim3Mask256 can use any of the functions below without any -// additional checks: its very existence *is* the check. - -use std::arch::x86_64::*; - -/// Shift `a` to the left by two bytes (removing its two most significant -/// bytes), and concatenate it with the the two most significant bytes of `b`. -#[target_feature(enable = "avx2")] -pub unsafe fn alignr256_14(a: __m256i, b: __m256i) -> __m256i { - // Credit goes to jneem for figuring this out: - // https://github.com/jneem/teddy/blob/9ab5e899ad6ef6911aecd3cf1033f1abe6e1f66c/src/x86/teddy_simd.rs#L145-L184 - // - // TL;DR avx2's PALIGNR instruction is actually just two 128-bit PALIGNR - // instructions, which is not what we want, so we need to do some extra - // shuffling. - - // This permute gives us the low 16 bytes of a concatenated with the high - // 16 bytes of b, in order of most significant to least significant. So - // `v = a[15:0] b[31:16]`. - let v = _mm256_permute2x128_si256(b, a, 0x21); - // This effectively does this (where we deal in terms of byte-indexing - // and byte-shifting, and use inclusive ranges): - // - // ret[15:0] := ((a[15:0] << 16) | v[15:0]) >> 14 - // = ((a[15:0] << 16) | b[31:16]) >> 14 - // ret[31:16] := ((a[31:16] << 16) | v[31:16]) >> 14 - // = ((a[31:16] << 16) | a[15:0]) >> 14 - // - // Which therefore results in: - // - // ret[31:0] := a[29:16] a[15:14] a[13:0] b[31:30] - // - // The end result is that we've effectively done this: - // - // (a << 2) | (b >> 30) - // - // When `A` and `B` are strings---where the beginning of the string is in - // the least significant bits---we effectively result in the following - // semantic operation: - // - // (A >> 2) | (B << 30) - // - // The reversal being attributed to the fact that we are in little-endian. - _mm256_alignr_epi8(a, v, 14) -} - -/// Shift `a` to the left by one byte (removing its most significant byte), and -/// concatenate it with the the most significant byte of `b`. -#[target_feature(enable = "avx2")] -pub unsafe fn alignr256_15(a: __m256i, b: __m256i) -> __m256i { - // For explanation, see alignr256_14. - let v = _mm256_permute2x128_si256(b, a, 0x21); - _mm256_alignr_epi8(a, v, 15) -} - -/// Unpack the given 128-bit vector into its 64-bit components. The first -/// element of the array returned corresponds to the least significant 64-bit -/// lane in `a`. -#[target_feature(enable = "ssse3")] -pub unsafe fn unpack64x128(a: __m128i) -> [u64; 2] { - [ - _mm_cvtsi128_si64(a) as u64, - _mm_cvtsi128_si64(_mm_srli_si128(a, 8)) as u64, - ] -} - -/// Unpack the given 256-bit vector into its 64-bit components. The first -/// element of the array returned corresponds to the least significant 64-bit -/// lane in `a`. -#[target_feature(enable = "avx2")] -pub unsafe fn unpack64x256(a: __m256i) -> [u64; 4] { - // Using transmute here is precisely equivalent, but actually slower. It's - // not quite clear why. - let lo = _mm256_extracti128_si256(a, 0); - let hi = _mm256_extracti128_si256(a, 1); - [ - _mm_cvtsi128_si64(lo) as u64, - _mm_cvtsi128_si64(_mm_srli_si128(lo, 8)) as u64, - _mm_cvtsi128_si64(hi) as u64, - _mm_cvtsi128_si64(_mm_srli_si128(hi, 8)) as u64, - ] -} - -/// Unpack the low 128-bits of `a` and `b`, and return them as 4 64-bit -/// integers. -/// -/// More precisely, if a = a4 a3 a2 a1 and b = b4 b3 b2 b1, where each element -/// is a 64-bit integer and a1/b1 correspond to the least significant 64 bits, -/// then the return value is `b2 b1 a2 a1`. -#[target_feature(enable = "avx2")] -pub unsafe fn unpacklo64x256(a: __m256i, b: __m256i) -> [u64; 4] { - let lo = _mm256_castsi256_si128(a); - let hi = _mm256_castsi256_si128(b); - [ - _mm_cvtsi128_si64(lo) as u64, - _mm_cvtsi128_si64(_mm_srli_si128(lo, 8)) as u64, - _mm_cvtsi128_si64(hi) as u64, - _mm_cvtsi128_si64(_mm_srli_si128(hi, 8)) as u64, - ] -} - -/// Returns true if and only if all bits in the given 128-bit vector are 0. -#[target_feature(enable = "ssse3")] -pub unsafe fn is_all_zeroes128(a: __m128i) -> bool { - let cmp = _mm_cmpeq_epi8(a, zeroes128()); - _mm_movemask_epi8(cmp) as u32 == 0xFFFF -} - -/// Returns true if and only if all bits in the given 256-bit vector are 0. -#[target_feature(enable = "avx2")] -pub unsafe fn is_all_zeroes256(a: __m256i) -> bool { - let cmp = _mm256_cmpeq_epi8(a, zeroes256()); - _mm256_movemask_epi8(cmp) as u32 == 0xFFFFFFFF -} - -/// Load a 128-bit vector from slice at the given position. The slice does -/// not need to be unaligned. -/// -/// Since this code assumes little-endian (there is no big-endian x86), the -/// bytes starting in `slice[at..]` will be at the least significant bits of -/// the returned vector. This is important for the surrounding code, since for -/// example, shifting the resulting vector right is equivalent to logically -/// shifting the bytes in `slice` left. -#[target_feature(enable = "sse2")] -pub unsafe fn loadu128(slice: &[u8], at: usize) -> __m128i { - let ptr = slice.get_unchecked(at..).as_ptr(); - _mm_loadu_si128(ptr as *const u8 as *const __m128i) -} - -/// Load a 256-bit vector from slice at the given position. The slice does -/// not need to be unaligned. -/// -/// Since this code assumes little-endian (there is no big-endian x86), the -/// bytes starting in `slice[at..]` will be at the least significant bits of -/// the returned vector. This is important for the surrounding code, since for -/// example, shifting the resulting vector right is equivalent to logically -/// shifting the bytes in `slice` left. -#[target_feature(enable = "avx2")] -pub unsafe fn loadu256(slice: &[u8], at: usize) -> __m256i { - let ptr = slice.get_unchecked(at..).as_ptr(); - _mm256_loadu_si256(ptr as *const u8 as *const __m256i) -} - -/// Returns a 128-bit vector with all bits set to 0. -#[target_feature(enable = "sse2")] -pub unsafe fn zeroes128() -> __m128i { - _mm_set1_epi8(0) -} - -/// Returns a 256-bit vector with all bits set to 0. -#[target_feature(enable = "avx2")] -pub unsafe fn zeroes256() -> __m256i { - _mm256_set1_epi8(0) -} - -/// Returns a 128-bit vector with all bits set to 1. -#[target_feature(enable = "sse2")] -pub unsafe fn ones128() -> __m128i { - _mm_set1_epi8(0xFF as u8 as i8) -} - -/// Returns a 256-bit vector with all bits set to 1. -#[target_feature(enable = "avx2")] -pub unsafe fn ones256() -> __m256i { - _mm256_set1_epi8(0xFF as u8 as i8) -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/prefilter.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/prefilter.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/prefilter.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/prefilter.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1057 +0,0 @@ -use std::cmp; -use std::fmt; -use std::panic::{RefUnwindSafe, UnwindSafe}; -use std::u8; - -use memchr::{memchr, memchr2, memchr3}; - -use crate::ahocorasick::MatchKind; -use crate::packed; -use crate::Match; - -/// A candidate is the result of running a prefilter on a haystack at a -/// particular position. The result is either no match, a confirmed match or -/// a possible match. -/// -/// When no match is returned, the prefilter is guaranteeing that no possible -/// match can be found in the haystack, and the caller may trust this. That is, -/// all correct prefilters must never report false negatives. -/// -/// In some cases, a prefilter can confirm a match very quickly, in which case, -/// the caller may use this to stop what it's doing and report the match. In -/// this case, prefilter implementations must never report a false positive. -/// In other cases, the prefilter can only report a potential match, in which -/// case the callers must attempt to confirm the match. In this case, prefilter -/// implementations are permitted to return false positives. -#[derive(Clone, Debug)] -pub enum Candidate { - None, - Match(Match), - PossibleStartOfMatch(usize), -} - -impl Candidate { - /// Convert this candidate into an option. This is useful when callers - /// do not distinguish between true positives and false positives (i.e., - /// the caller must always confirm the match in order to update some other - /// state). - pub fn into_option(self) -> Option { - match self { - Candidate::None => None, - Candidate::Match(ref m) => Some(m.start()), - Candidate::PossibleStartOfMatch(start) => Some(start), - } - } -} - -/// A prefilter describes the behavior of fast literal scanners for quickly -/// skipping past bytes in the haystack that we know cannot possibly -/// participate in a match. -pub trait Prefilter: - Send + Sync + RefUnwindSafe + UnwindSafe + fmt::Debug -{ - /// Returns the next possible match candidate. This may yield false - /// positives, so callers must confirm a match starting at the position - /// returned. This, however, must never produce false negatives. That is, - /// this must, at minimum, return the starting position of the next match - /// in the given haystack after or at the given position. - fn next_candidate( - &self, - state: &mut PrefilterState, - haystack: &[u8], - at: usize, - ) -> Candidate; - - /// A method for cloning a prefilter, to work-around the fact that Clone - /// is not object-safe. - fn clone_prefilter(&self) -> Box; - - /// Returns the approximate total amount of heap used by this prefilter, in - /// units of bytes. - fn heap_bytes(&self) -> usize; - - /// Returns true if and only if this prefilter never returns false - /// positives. This is useful for completely avoiding the automaton - /// when the prefilter can quickly confirm its own matches. - /// - /// By default, this returns true, which is conservative; it is always - /// correct to return `true`. Returning `false` here and reporting a false - /// positive will result in incorrect searches. - fn reports_false_positives(&self) -> bool { - true - } - - /// Returns true if and only if this prefilter may look for a non-starting - /// position of a match. - /// - /// This is useful in a streaming context where prefilters that don't look - /// for a starting position of a match can be quite difficult to deal with. - /// - /// This returns false by default. - fn looks_for_non_start_of_match(&self) -> bool { - false - } -} - -impl<'a, P: Prefilter + ?Sized> Prefilter for &'a P { - #[inline] - fn next_candidate( - &self, - state: &mut PrefilterState, - haystack: &[u8], - at: usize, - ) -> Candidate { - (**self).next_candidate(state, haystack, at) - } - - fn clone_prefilter(&self) -> Box { - (**self).clone_prefilter() - } - - fn heap_bytes(&self) -> usize { - (**self).heap_bytes() - } - - fn reports_false_positives(&self) -> bool { - (**self).reports_false_positives() - } -} - -/// A convenience object for representing any type that implements Prefilter -/// and is cloneable. -#[derive(Debug)] -pub struct PrefilterObj(Box); - -impl Clone for PrefilterObj { - fn clone(&self) -> Self { - PrefilterObj(self.0.clone_prefilter()) - } -} - -impl PrefilterObj { - /// Create a new prefilter object. - pub fn new(t: T) -> PrefilterObj { - PrefilterObj(Box::new(t)) - } - - /// Return the underlying prefilter trait object. - pub fn as_ref(&self) -> &dyn Prefilter { - &*self.0 - } -} - -/// PrefilterState tracks state associated with the effectiveness of a -/// prefilter. It is used to track how many bytes, on average, are skipped by -/// the prefilter. If this average dips below a certain threshold over time, -/// then the state renders the prefilter inert and stops using it. -/// -/// A prefilter state should be created for each search. (Where creating an -/// iterator via, e.g., `find_iter`, is treated as a single search.) -#[derive(Clone, Debug)] -pub struct PrefilterState { - /// The number of skips that has been executed. - skips: usize, - /// The total number of bytes that have been skipped. - skipped: usize, - /// The maximum length of a match. This is used to help determine how many - /// bytes on average should be skipped in order for a prefilter to be - /// effective. - max_match_len: usize, - /// Once this heuristic has been deemed permanently ineffective, it will be - /// inert throughout the rest of its lifetime. This serves as a cheap way - /// to check inertness. - inert: bool, - /// The last (absolute) position at which a prefilter scanned to. - /// Prefilters can use this position to determine whether to re-scan or - /// not. - /// - /// Unlike other things that impact effectiveness, this is a fleeting - /// condition. That is, a prefilter can be considered ineffective if it is - /// at a position before `last_scan_at`, but can become effective again - /// once the search moves past `last_scan_at`. - /// - /// The utility of this is to both avoid additional overhead from calling - /// the prefilter and to avoid quadratic behavior. This ensures that a - /// prefilter will scan any particular byte at most once. (Note that some - /// prefilters, like the start-byte prefilter, do not need to use this - /// field at all, since it only looks for starting bytes.) - last_scan_at: usize, -} - -impl PrefilterState { - /// The minimum number of skip attempts to try before considering whether - /// a prefilter is effective or not. - const MIN_SKIPS: usize = 40; - - /// The minimum amount of bytes that skipping must average, expressed as a - /// factor of the multiple of the length of a possible match. - /// - /// That is, after MIN_SKIPS have occurred, if the average number of bytes - /// skipped ever falls below MIN_AVG_FACTOR * max-match-length, then the - /// prefilter outed to be rendered inert. - const MIN_AVG_FACTOR: usize = 2; - - /// Create a fresh prefilter state. - pub fn new(max_match_len: usize) -> PrefilterState { - PrefilterState { - skips: 0, - skipped: 0, - max_match_len, - inert: false, - last_scan_at: 0, - } - } - - /// Create a prefilter state that always disables the prefilter. - pub fn disabled() -> PrefilterState { - PrefilterState { - skips: 0, - skipped: 0, - max_match_len: 0, - inert: true, - last_scan_at: 0, - } - } - - /// Update this state with the number of bytes skipped on the last - /// invocation of the prefilter. - #[inline] - fn update_skipped_bytes(&mut self, skipped: usize) { - self.skips += 1; - self.skipped += skipped; - } - - /// Updates the position at which the last scan stopped. This may be - /// greater than the position of the last candidate reported. For example, - /// searching for the "rare" byte `z` in `abczdef` for the pattern `abcz` - /// will report a candidate at position `0`, but the end of its last scan - /// will be at position `3`. - /// - /// This position factors into the effectiveness of this prefilter. If the - /// current position is less than the last position at which a scan ended, - /// then the prefilter should not be re-run until the search moves past - /// that position. - #[inline] - fn update_at(&mut self, at: usize) { - if at > self.last_scan_at { - self.last_scan_at = at; - } - } - - /// Return true if and only if this state indicates that a prefilter is - /// still effective. - /// - /// The given pos should correspond to the current starting position of the - /// search. - #[inline] - pub fn is_effective(&mut self, at: usize) -> bool { - if self.inert { - return false; - } - if at < self.last_scan_at { - return false; - } - if self.skips < PrefilterState::MIN_SKIPS { - return true; - } - - let min_avg = PrefilterState::MIN_AVG_FACTOR * self.max_match_len; - if self.skipped >= min_avg * self.skips { - return true; - } - - // We're inert. - self.inert = true; - false - } -} - -/// A builder for constructing the best possible prefilter. When constructed, -/// this builder will heuristically select the best prefilter it can build, -/// if any, and discard the rest. -#[derive(Debug)] -pub struct Builder { - count: usize, - ascii_case_insensitive: bool, - start_bytes: StartBytesBuilder, - rare_bytes: RareBytesBuilder, - packed: Option, -} - -impl Builder { - /// Create a new builder for constructing the best possible prefilter. - pub fn new(kind: MatchKind) -> Builder { - let pbuilder = kind - .as_packed() - .map(|kind| packed::Config::new().match_kind(kind).builder()); - Builder { - count: 0, - ascii_case_insensitive: false, - start_bytes: StartBytesBuilder::new(), - rare_bytes: RareBytesBuilder::new(), - packed: pbuilder, - } - } - - /// Enable ASCII case insensitivity. When set, byte strings added to this - /// builder will be interpreted without respect to ASCII case. - pub fn ascii_case_insensitive(mut self, yes: bool) -> Builder { - self.ascii_case_insensitive = yes; - self.start_bytes = self.start_bytes.ascii_case_insensitive(yes); - self.rare_bytes = self.rare_bytes.ascii_case_insensitive(yes); - self - } - - /// Return a prefilter suitable for quickly finding potential matches. - /// - /// All patterns added to an Aho-Corasick automaton should be added to this - /// builder before attempting to construct the prefilter. - pub fn build(&self) -> Option { - // match (self.start_bytes.build(), self.rare_bytes.build()) { - match (self.start_bytes.build(), self.rare_bytes.build()) { - // If we could build both start and rare prefilters, then there are - // a few cases in which we'd want to use the start-byte prefilter - // over the rare-byte prefilter, since the former has lower - // overhead. - (prestart @ Some(_), prerare @ Some(_)) => { - // If the start-byte prefilter can scan for a smaller number - // of bytes than the rare-byte prefilter, then it's probably - // faster. - let has_fewer_bytes = - self.start_bytes.count < self.rare_bytes.count; - // Otherwise, if the combined frequency rank of the detected - // bytes in the start-byte prefilter is "close" to the combined - // frequency rank of the rare-byte prefilter, then we pick - // the start-byte prefilter even if the rare-byte prefilter - // heuristically searches for rare bytes. This is because the - // rare-byte prefilter has higher constant costs, so we tend to - // prefer the start-byte prefilter when we can. - let has_rarer_bytes = - self.start_bytes.rank_sum <= self.rare_bytes.rank_sum + 50; - if has_fewer_bytes || has_rarer_bytes { - prestart - } else { - prerare - } - } - (prestart @ Some(_), None) => prestart, - (None, prerare @ Some(_)) => prerare, - (None, None) if self.ascii_case_insensitive => None, - (None, None) => self - .packed - .as_ref() - .and_then(|b| b.build()) - .map(|s| PrefilterObj::new(Packed(s))), - } - } - - /// Add a literal string to this prefilter builder. - pub fn add(&mut self, bytes: &[u8]) { - self.count += 1; - self.start_bytes.add(bytes); - self.rare_bytes.add(bytes); - if let Some(ref mut pbuilder) = self.packed { - pbuilder.add(bytes); - } - } -} - -/// A type that wraps a packed searcher and implements the `Prefilter` -/// interface. -#[derive(Clone, Debug)] -struct Packed(packed::Searcher); - -impl Prefilter for Packed { - fn next_candidate( - &self, - _state: &mut PrefilterState, - haystack: &[u8], - at: usize, - ) -> Candidate { - self.0.find_at(haystack, at).map_or(Candidate::None, Candidate::Match) - } - - fn clone_prefilter(&self) -> Box { - Box::new(self.clone()) - } - - fn heap_bytes(&self) -> usize { - self.0.heap_bytes() - } - - fn reports_false_positives(&self) -> bool { - false - } -} - -/// A builder for constructing a rare byte prefilter. -/// -/// A rare byte prefilter attempts to pick out a small set of rare bytes that -/// occurr in the patterns, and then quickly scan to matches of those rare -/// bytes. -#[derive(Clone, Debug)] -struct RareBytesBuilder { - /// Whether this prefilter should account for ASCII case insensitivity or - /// not. - ascii_case_insensitive: bool, - /// A set of rare bytes, indexed by byte value. - rare_set: ByteSet, - /// A set of byte offsets associated with bytes in a pattern. An entry - /// corresponds to a particular bytes (its index) and is only non-zero if - /// the byte occurred at an offset greater than 0 in at least one pattern. - /// - /// If a byte's offset is not representable in 8 bits, then the rare bytes - /// prefilter becomes inert. - byte_offsets: RareByteOffsets, - /// Whether this is available as a prefilter or not. This can be set to - /// false during construction if a condition is seen that invalidates the - /// use of the rare-byte prefilter. - available: bool, - /// The number of bytes set to an active value in `byte_offsets`. - count: usize, - /// The sum of frequency ranks for the rare bytes detected. This is - /// intended to give a heuristic notion of how rare the bytes are. - rank_sum: u16, -} - -/// A set of bytes. -#[derive(Clone, Copy)] -struct ByteSet([bool; 256]); - -impl ByteSet { - fn empty() -> ByteSet { - ByteSet([false; 256]) - } - - fn insert(&mut self, b: u8) -> bool { - let new = !self.contains(b); - self.0[b as usize] = true; - new - } - - fn contains(&self, b: u8) -> bool { - self.0[b as usize] - } -} - -impl fmt::Debug for ByteSet { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut bytes = vec![]; - for b in 0..=255 { - if self.contains(b) { - bytes.push(b); - } - } - f.debug_struct("ByteSet").field("set", &bytes).finish() - } -} - -/// A set of byte offsets, keyed by byte. -#[derive(Clone, Copy)] -struct RareByteOffsets { - /// Each entry corresponds to the maximum offset of the corresponding - /// byte across all patterns seen. - set: [RareByteOffset; 256], -} - -impl RareByteOffsets { - /// Create a new empty set of rare byte offsets. - pub fn empty() -> RareByteOffsets { - RareByteOffsets { set: [RareByteOffset::default(); 256] } - } - - /// Add the given offset for the given byte to this set. If the offset is - /// greater than the existing offset, then it overwrites the previous - /// value and returns false. If there is no previous value set, then this - /// sets it and returns true. - pub fn set(&mut self, byte: u8, off: RareByteOffset) { - self.set[byte as usize].max = - cmp::max(self.set[byte as usize].max, off.max); - } -} - -impl fmt::Debug for RareByteOffsets { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut offsets = vec![]; - for off in self.set.iter() { - if off.max > 0 { - offsets.push(off); - } - } - f.debug_struct("RareByteOffsets").field("set", &offsets).finish() - } -} - -/// Offsets associated with an occurrence of a "rare" byte in any of the -/// patterns used to construct a single Aho-Corasick automaton. -#[derive(Clone, Copy, Debug)] -struct RareByteOffset { - /// The maximum offset at which a particular byte occurs from the start - /// of any pattern. This is used as a shift amount. That is, when an - /// occurrence of this byte is found, the candidate position reported by - /// the prefilter is `position_of_byte - max`, such that the automaton - /// will begin its search at a position that is guaranteed to observe a - /// match. - /// - /// To avoid accidentally quadratic behavior, a prefilter is considered - /// ineffective when it is asked to start scanning from a position that it - /// has already scanned past. - /// - /// Using a `u8` here means that if we ever see a pattern that's longer - /// than 255 bytes, then the entire rare byte prefilter is disabled. - max: u8, -} - -impl Default for RareByteOffset { - fn default() -> RareByteOffset { - RareByteOffset { max: 0 } - } -} - -impl RareByteOffset { - /// Create a new rare byte offset. If the given offset is too big, then - /// None is returned. In that case, callers should render the rare bytes - /// prefilter inert. - fn new(max: usize) -> Option { - if max > u8::MAX as usize { - None - } else { - Some(RareByteOffset { max: max as u8 }) - } - } -} - -impl RareBytesBuilder { - /// Create a new builder for constructing a rare byte prefilter. - fn new() -> RareBytesBuilder { - RareBytesBuilder { - ascii_case_insensitive: false, - rare_set: ByteSet::empty(), - byte_offsets: RareByteOffsets::empty(), - available: true, - count: 0, - rank_sum: 0, - } - } - - /// Enable ASCII case insensitivity. When set, byte strings added to this - /// builder will be interpreted without respect to ASCII case. - fn ascii_case_insensitive(mut self, yes: bool) -> RareBytesBuilder { - self.ascii_case_insensitive = yes; - self - } - - /// Build the rare bytes prefilter. - /// - /// If there are more than 3 distinct starting bytes, or if heuristics - /// otherwise determine that this prefilter should not be used, then `None` - /// is returned. - fn build(&self) -> Option { - if !self.available || self.count > 3 { - return None; - } - let (mut bytes, mut len) = ([0; 3], 0); - for b in 0..=255 { - if self.rare_set.contains(b) { - bytes[len] = b as u8; - len += 1; - } - } - match len { - 0 => None, - 1 => Some(PrefilterObj::new(RareBytesOne { - byte1: bytes[0], - offset: self.byte_offsets.set[bytes[0] as usize], - })), - 2 => Some(PrefilterObj::new(RareBytesTwo { - offsets: self.byte_offsets, - byte1: bytes[0], - byte2: bytes[1], - })), - 3 => Some(PrefilterObj::new(RareBytesThree { - offsets: self.byte_offsets, - byte1: bytes[0], - byte2: bytes[1], - byte3: bytes[2], - })), - _ => unreachable!(), - } - } - - /// Add a byte string to this builder. - /// - /// All patterns added to an Aho-Corasick automaton should be added to this - /// builder before attempting to construct the prefilter. - fn add(&mut self, bytes: &[u8]) { - // If we've already given up, then do nothing. - if !self.available { - return; - } - // If we've already blown our budget, then don't waste time looking - // for more rare bytes. - if self.count > 3 { - self.available = false; - return; - } - // If the pattern is too long, then our offset table is bunk, so - // give up. - if bytes.len() >= 256 { - self.available = false; - return; - } - let mut rarest = match bytes.get(0) { - None => return, - Some(&b) => (b, freq_rank(b)), - }; - // The idea here is to look for the rarest byte in each pattern, and - // add that to our set. As a special exception, if we see a byte that - // we've already added, then we immediately stop and choose that byte, - // even if there's another rare byte in the pattern. This helps us - // apply the rare byte optimization in more cases by attempting to pick - // bytes that are in common between patterns. So for example, if we - // were searching for `Sherlock` and `lockjaw`, then this would pick - // `k` for both patterns, resulting in the use of `memchr` instead of - // `memchr2` for `k` and `j`. - let mut found = false; - for (pos, &b) in bytes.iter().enumerate() { - self.set_offset(pos, b); - if found { - continue; - } - if self.rare_set.contains(b) { - found = true; - continue; - } - let rank = freq_rank(b); - if rank < rarest.1 { - rarest = (b, rank); - } - } - if !found { - self.add_rare_byte(rarest.0); - } - } - - fn set_offset(&mut self, pos: usize, byte: u8) { - // This unwrap is OK because pos is never bigger than our max. - let offset = RareByteOffset::new(pos).unwrap(); - self.byte_offsets.set(byte, offset); - if self.ascii_case_insensitive { - self.byte_offsets.set(opposite_ascii_case(byte), offset); - } - } - - fn add_rare_byte(&mut self, byte: u8) { - self.add_one_rare_byte(byte); - if self.ascii_case_insensitive { - self.add_one_rare_byte(opposite_ascii_case(byte)); - } - } - - fn add_one_rare_byte(&mut self, byte: u8) { - if self.rare_set.insert(byte) { - self.count += 1; - self.rank_sum += freq_rank(byte) as u16; - } - } -} - -/// A prefilter for scanning for a single "rare" byte. -#[derive(Clone, Debug)] -struct RareBytesOne { - byte1: u8, - offset: RareByteOffset, -} - -impl Prefilter for RareBytesOne { - fn next_candidate( - &self, - state: &mut PrefilterState, - haystack: &[u8], - at: usize, - ) -> Candidate { - memchr(self.byte1, &haystack[at..]) - .map(|i| { - let pos = at + i; - state.last_scan_at = pos; - cmp::max(at, pos.saturating_sub(self.offset.max as usize)) - }) - .map_or(Candidate::None, Candidate::PossibleStartOfMatch) - } - - fn clone_prefilter(&self) -> Box { - Box::new(self.clone()) - } - - fn heap_bytes(&self) -> usize { - 0 - } - - fn looks_for_non_start_of_match(&self) -> bool { - // TODO: It should be possible to use a rare byte prefilter in a - // streaming context. The main problem is that we usually assume that - // if a prefilter has scanned some text and not found anything, then no - // match *starts* in that text. This doesn't matter in non-streaming - // contexts, but in a streaming context, if we're looking for a byte - // that doesn't start at the beginning of a match and don't find it, - // then it's still possible for a match to start at the end of the - // current buffer content. In order to fix this, the streaming searcher - // would need to become aware of prefilters that do this and use the - // appropriate offset in various places. It is quite a delicate change - // and probably shouldn't be attempted until streaming search has a - // better testing strategy. In particular, we'd really like to be able - // to vary the buffer size to force strange cases that occur at the - // edge of the buffer. If we make the buffer size minimal, then these - // cases occur more frequently and easier. - // - // This is also a bummer because this means that if the prefilter - // builder chose a rare byte prefilter, then a streaming search won't - // use any prefilter at all because the builder doesn't know how it's - // going to be used. Assuming we don't make streaming search aware of - // these special types of prefilters as described above, we could fix - // this by building a "backup" prefilter that could be used when the - // rare byte prefilter could not. But that's a bandaide. Sigh. - true - } -} - -/// A prefilter for scanning for two "rare" bytes. -#[derive(Clone, Debug)] -struct RareBytesTwo { - offsets: RareByteOffsets, - byte1: u8, - byte2: u8, -} - -impl Prefilter for RareBytesTwo { - fn next_candidate( - &self, - state: &mut PrefilterState, - haystack: &[u8], - at: usize, - ) -> Candidate { - memchr2(self.byte1, self.byte2, &haystack[at..]) - .map(|i| { - let pos = at + i; - state.update_at(pos); - let offset = self.offsets.set[haystack[pos] as usize].max; - cmp::max(at, pos.saturating_sub(offset as usize)) - }) - .map_or(Candidate::None, Candidate::PossibleStartOfMatch) - } - - fn clone_prefilter(&self) -> Box { - Box::new(self.clone()) - } - - fn heap_bytes(&self) -> usize { - 0 - } - - fn looks_for_non_start_of_match(&self) -> bool { - // TODO: See Prefilter impl for RareBytesOne. - true - } -} - -/// A prefilter for scanning for three "rare" bytes. -#[derive(Clone, Debug)] -struct RareBytesThree { - offsets: RareByteOffsets, - byte1: u8, - byte2: u8, - byte3: u8, -} - -impl Prefilter for RareBytesThree { - fn next_candidate( - &self, - state: &mut PrefilterState, - haystack: &[u8], - at: usize, - ) -> Candidate { - memchr3(self.byte1, self.byte2, self.byte3, &haystack[at..]) - .map(|i| { - let pos = at + i; - state.update_at(pos); - let offset = self.offsets.set[haystack[pos] as usize].max; - cmp::max(at, pos.saturating_sub(offset as usize)) - }) - .map_or(Candidate::None, Candidate::PossibleStartOfMatch) - } - - fn clone_prefilter(&self) -> Box { - Box::new(self.clone()) - } - - fn heap_bytes(&self) -> usize { - 0 - } - - fn looks_for_non_start_of_match(&self) -> bool { - // TODO: See Prefilter impl for RareBytesOne. - true - } -} - -/// A builder for constructing a starting byte prefilter. -/// -/// A starting byte prefilter is a simplistic prefilter that looks for possible -/// matches by reporting all positions corresponding to a particular byte. This -/// generally only takes affect when there are at most 3 distinct possible -/// starting bytes. e.g., the patterns `foo`, `bar`, and `baz` have two -/// distinct starting bytes (`f` and `b`), and this prefilter returns all -/// occurrences of either `f` or `b`. -/// -/// In some cases, a heuristic frequency analysis may determine that it would -/// be better not to use this prefilter even when there are 3 or fewer distinct -/// starting bytes. -#[derive(Clone, Debug)] -struct StartBytesBuilder { - /// Whether this prefilter should account for ASCII case insensitivity or - /// not. - ascii_case_insensitive: bool, - /// The set of starting bytes observed. - byteset: Vec, - /// The number of bytes set to true in `byteset`. - count: usize, - /// The sum of frequency ranks for the rare bytes detected. This is - /// intended to give a heuristic notion of how rare the bytes are. - rank_sum: u16, -} - -impl StartBytesBuilder { - /// Create a new builder for constructing a start byte prefilter. - fn new() -> StartBytesBuilder { - StartBytesBuilder { - ascii_case_insensitive: false, - byteset: vec![false; 256], - count: 0, - rank_sum: 0, - } - } - - /// Enable ASCII case insensitivity. When set, byte strings added to this - /// builder will be interpreted without respect to ASCII case. - fn ascii_case_insensitive(mut self, yes: bool) -> StartBytesBuilder { - self.ascii_case_insensitive = yes; - self - } - - /// Build the starting bytes prefilter. - /// - /// If there are more than 3 distinct starting bytes, or if heuristics - /// otherwise determine that this prefilter should not be used, then `None` - /// is returned. - fn build(&self) -> Option { - if self.count > 3 { - return None; - } - let (mut bytes, mut len) = ([0; 3], 0); - for b in 0..256 { - if !self.byteset[b] { - continue; - } - // We don't handle non-ASCII bytes for now. Getting non-ASCII - // bytes right is trickier, since we generally don't want to put - // a leading UTF-8 code unit into a prefilter that isn't ASCII, - // since they can frequently. Instead, it would be better to use a - // continuation byte, but this requires more sophisticated analysis - // of the automaton and a richer prefilter API. - if b > 0x7F { - return None; - } - bytes[len] = b as u8; - len += 1; - } - match len { - 0 => None, - 1 => Some(PrefilterObj::new(StartBytesOne { byte1: bytes[0] })), - 2 => Some(PrefilterObj::new(StartBytesTwo { - byte1: bytes[0], - byte2: bytes[1], - })), - 3 => Some(PrefilterObj::new(StartBytesThree { - byte1: bytes[0], - byte2: bytes[1], - byte3: bytes[2], - })), - _ => unreachable!(), - } - } - - /// Add a byte string to this builder. - /// - /// All patterns added to an Aho-Corasick automaton should be added to this - /// builder before attempting to construct the prefilter. - fn add(&mut self, bytes: &[u8]) { - if self.count > 3 { - return; - } - if let Some(&byte) = bytes.get(0) { - self.add_one_byte(byte); - if self.ascii_case_insensitive { - self.add_one_byte(opposite_ascii_case(byte)); - } - } - } - - fn add_one_byte(&mut self, byte: u8) { - if !self.byteset[byte as usize] { - self.byteset[byte as usize] = true; - self.count += 1; - self.rank_sum += freq_rank(byte) as u16; - } - } -} - -/// A prefilter for scanning for a single starting byte. -#[derive(Clone, Debug)] -struct StartBytesOne { - byte1: u8, -} - -impl Prefilter for StartBytesOne { - fn next_candidate( - &self, - _state: &mut PrefilterState, - haystack: &[u8], - at: usize, - ) -> Candidate { - memchr(self.byte1, &haystack[at..]) - .map(|i| at + i) - .map_or(Candidate::None, Candidate::PossibleStartOfMatch) - } - - fn clone_prefilter(&self) -> Box { - Box::new(self.clone()) - } - - fn heap_bytes(&self) -> usize { - 0 - } -} - -/// A prefilter for scanning for two starting bytes. -#[derive(Clone, Debug)] -struct StartBytesTwo { - byte1: u8, - byte2: u8, -} - -impl Prefilter for StartBytesTwo { - fn next_candidate( - &self, - _state: &mut PrefilterState, - haystack: &[u8], - at: usize, - ) -> Candidate { - memchr2(self.byte1, self.byte2, &haystack[at..]) - .map(|i| at + i) - .map_or(Candidate::None, Candidate::PossibleStartOfMatch) - } - - fn clone_prefilter(&self) -> Box { - Box::new(self.clone()) - } - - fn heap_bytes(&self) -> usize { - 0 - } -} - -/// A prefilter for scanning for three starting bytes. -#[derive(Clone, Debug)] -struct StartBytesThree { - byte1: u8, - byte2: u8, - byte3: u8, -} - -impl Prefilter for StartBytesThree { - fn next_candidate( - &self, - _state: &mut PrefilterState, - haystack: &[u8], - at: usize, - ) -> Candidate { - memchr3(self.byte1, self.byte2, self.byte3, &haystack[at..]) - .map(|i| at + i) - .map_or(Candidate::None, Candidate::PossibleStartOfMatch) - } - - fn clone_prefilter(&self) -> Box { - Box::new(self.clone()) - } - - fn heap_bytes(&self) -> usize { - 0 - } -} - -/// Return the next candidate reported by the given prefilter while -/// simultaneously updating the given prestate. -/// -/// The caller is responsible for checking the prestate before deciding whether -/// to initiate a search. -#[inline] -pub fn next( - prestate: &mut PrefilterState, - prefilter: P, - haystack: &[u8], - at: usize, -) -> Candidate { - let cand = prefilter.next_candidate(prestate, haystack, at); - match cand { - Candidate::None => { - prestate.update_skipped_bytes(haystack.len() - at); - } - Candidate::Match(ref m) => { - prestate.update_skipped_bytes(m.start() - at); - } - Candidate::PossibleStartOfMatch(i) => { - prestate.update_skipped_bytes(i - at); - } - } - cand -} - -/// If the given byte is an ASCII letter, then return it in the opposite case. -/// e.g., Given `b'A'`, this returns `b'a'`, and given `b'a'`, this returns -/// `b'A'`. If a non-ASCII letter is given, then the given byte is returned. -pub fn opposite_ascii_case(b: u8) -> u8 { - if b'A' <= b && b <= b'Z' { - b.to_ascii_lowercase() - } else if b'a' <= b && b <= b'z' { - b.to_ascii_uppercase() - } else { - b - } -} - -/// Return the frequency rank of the given byte. The higher the rank, the more -/// common the byte (heuristically speaking). -fn freq_rank(b: u8) -> u8 { - use crate::byte_frequencies::BYTE_FREQUENCIES; - BYTE_FREQUENCIES[b as usize] -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn scratch() { - let mut b = Builder::new(MatchKind::LeftmostFirst); - b.add(b"Sherlock"); - b.add(b"locjaw"); - // b.add(b"Sherlock"); - // b.add(b"Holmes"); - // b.add(b"Watson"); - // b.add("Шерлок Холмс".as_bytes()); - // b.add("Джон Уотсон".as_bytes()); - - let s = b.build().unwrap(); - println!("{:?}", s); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/state_id.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/state_id.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/state_id.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/state_id.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,192 +0,0 @@ -use std::fmt::Debug; -use std::hash::Hash; - -use crate::error::{Error, Result}; - -// NOTE: Most of this code was copied from regex-automata, but without the -// (de)serialization specific stuff. - -/// Check that the premultiplication of the given state identifier can -/// fit into the representation indicated by `S`. If it cannot, or if it -/// overflows `usize` itself, then an error is returned. -pub fn premultiply_overflow_error( - last_state: S, - alphabet_len: usize, -) -> Result<()> { - let requested = match last_state.to_usize().checked_mul(alphabet_len) { - Some(requested) => requested, - None => return Err(Error::premultiply_overflow(0, 0)), - }; - if requested > S::max_id() { - return Err(Error::premultiply_overflow(S::max_id(), requested)); - } - Ok(()) -} - -/// Convert the given `usize` to the chosen state identifier -/// representation. If the given value cannot fit in the chosen -/// representation, then an error is returned. -pub fn usize_to_state_id(value: usize) -> Result { - if value > S::max_id() { - Err(Error::state_id_overflow(S::max_id())) - } else { - Ok(S::from_usize(value)) - } -} - -/// Return the unique identifier for an automaton's fail state in the chosen -/// representation indicated by `S`. -pub fn fail_id() -> S { - S::from_usize(0) -} - -/// Return the unique identifier for an automaton's fail state in the chosen -/// representation indicated by `S`. -pub fn dead_id() -> S { - S::from_usize(1) -} - -mod private { - /// Sealed stops crates other than aho-corasick from implementing any - /// traits that use it. - pub trait Sealed {} - impl Sealed for u8 {} - impl Sealed for u16 {} - impl Sealed for u32 {} - impl Sealed for u64 {} - impl Sealed for usize {} -} - -/// A trait describing the representation of an automaton's state identifier. -/// -/// The purpose of this trait is to safely express both the possible state -/// identifier representations that can be used in an automaton and to convert -/// between state identifier representations and types that can be used to -/// efficiently index memory (such as `usize`). -/// -/// In general, one should not need to implement this trait explicitly. Indeed, -/// for now, this trait is sealed such that it cannot be implemented by any -/// other type. In particular, this crate provides implementations for `u8`, -/// `u16`, `u32`, `u64` and `usize`. (`u32` and `u64` are only provided for -/// targets that can represent all corresponding values in a `usize`.) -pub trait StateID: - private::Sealed - + Clone - + Copy - + Debug - + Eq - + Hash - + PartialEq - + PartialOrd - + Ord -{ - /// Convert from a `usize` to this implementation's representation. - /// - /// Implementors may assume that `n <= Self::max_id`. That is, implementors - /// do not need to check whether `n` can fit inside this implementation's - /// representation. - fn from_usize(n: usize) -> Self; - - /// Convert this implementation's representation to a `usize`. - /// - /// Implementors must not return a `usize` value greater than - /// `Self::max_id` and must not permit overflow when converting between the - /// implementor's representation and `usize`. In general, the preferred - /// way for implementors to achieve this is to simply not provide - /// implementations of `StateID` that cannot fit into the target platform's - /// `usize`. - fn to_usize(self) -> usize; - - /// Return the maximum state identifier supported by this representation. - /// - /// Implementors must return a correct bound. Doing otherwise may result - /// in unspecified behavior (but will not violate memory safety). - fn max_id() -> usize; -} - -impl StateID for usize { - #[inline] - fn from_usize(n: usize) -> usize { - n - } - - #[inline] - fn to_usize(self) -> usize { - self - } - - #[inline] - fn max_id() -> usize { - ::std::usize::MAX - } -} - -impl StateID for u8 { - #[inline] - fn from_usize(n: usize) -> u8 { - n as u8 - } - - #[inline] - fn to_usize(self) -> usize { - self as usize - } - - #[inline] - fn max_id() -> usize { - ::std::u8::MAX as usize - } -} - -impl StateID for u16 { - #[inline] - fn from_usize(n: usize) -> u16 { - n as u16 - } - - #[inline] - fn to_usize(self) -> usize { - self as usize - } - - #[inline] - fn max_id() -> usize { - ::std::u16::MAX as usize - } -} - -#[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] -impl StateID for u32 { - #[inline] - fn from_usize(n: usize) -> u32 { - n as u32 - } - - #[inline] - fn to_usize(self) -> usize { - self as usize - } - - #[inline] - fn max_id() -> usize { - ::std::u32::MAX as usize - } -} - -#[cfg(target_pointer_width = "64")] -impl StateID for u64 { - #[inline] - fn from_usize(n: usize) -> u64 { - n as u64 - } - - #[inline] - fn to_usize(self) -> usize { - self as usize - } - - #[inline] - fn max_id() -> usize { - ::std::u64::MAX as usize - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/tests.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/tests.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/tests.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/src/tests.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1254 +0,0 @@ -use std::collections::HashMap; -use std::io; -use std::usize; - -use crate::{AhoCorasickBuilder, Match, MatchKind}; - -/// A description of a single test against an Aho-Corasick automaton. -/// -/// A single test may not necessarily pass on every configuration of an -/// Aho-Corasick automaton. The tests are categorized and grouped appropriately -/// below. -#[derive(Clone, Debug, Eq, PartialEq)] -struct SearchTest { - /// The name of this test, for debugging. - name: &'static str, - /// The patterns to search for. - patterns: &'static [&'static str], - /// The text to search. - haystack: &'static str, - /// Each match is a triple of (pattern_index, start, end), where - /// pattern_index is an index into `patterns` and `start`/`end` are indices - /// into `haystack`. - matches: &'static [(usize, usize, usize)], -} - -/// Short-hand constructor for SearchTest. We use it a lot below. -macro_rules! t { - ($name:ident, $patterns:expr, $haystack:expr, $matches:expr) => { - SearchTest { - name: stringify!($name), - patterns: $patterns, - haystack: $haystack, - matches: $matches, - } - }; -} - -/// A collection of test groups. -type TestCollection = &'static [&'static [SearchTest]]; - -// Define several collections corresponding to the different type of match -// semantics supported by Aho-Corasick. These collections have some overlap, -// but each collection should have some tests that no other collection has. - -/// Tests for Aho-Corasick's standard non-overlapping match semantics. -const AC_STANDARD_NON_OVERLAPPING: TestCollection = - &[BASICS, NON_OVERLAPPING, STANDARD, REGRESSION]; - -/// Tests for Aho-Corasick's anchored standard non-overlapping match semantics. -const AC_STANDARD_ANCHORED_NON_OVERLAPPING: TestCollection = - &[ANCHORED_BASICS, ANCHORED_NON_OVERLAPPING, STANDARD_ANCHORED]; - -/// Tests for Aho-Corasick's standard overlapping match semantics. -const AC_STANDARD_OVERLAPPING: TestCollection = - &[BASICS, OVERLAPPING, REGRESSION]; - -/// Tests for Aho-Corasick's anchored standard overlapping match semantics. -const AC_STANDARD_ANCHORED_OVERLAPPING: TestCollection = - &[ANCHORED_BASICS, ANCHORED_OVERLAPPING]; - -/// Tests for Aho-Corasick's leftmost-first match semantics. -const AC_LEFTMOST_FIRST: TestCollection = - &[BASICS, NON_OVERLAPPING, LEFTMOST, LEFTMOST_FIRST, REGRESSION]; - -/// Tests for Aho-Corasick's anchored leftmost-first match semantics. -const AC_LEFTMOST_FIRST_ANCHORED: TestCollection = &[ - ANCHORED_BASICS, - ANCHORED_NON_OVERLAPPING, - ANCHORED_LEFTMOST, - ANCHORED_LEFTMOST_FIRST, -]; - -/// Tests for Aho-Corasick's leftmost-longest match semantics. -const AC_LEFTMOST_LONGEST: TestCollection = - &[BASICS, NON_OVERLAPPING, LEFTMOST, LEFTMOST_LONGEST, REGRESSION]; - -/// Tests for Aho-Corasick's anchored leftmost-longest match semantics. -const AC_LEFTMOST_LONGEST_ANCHORED: TestCollection = &[ - ANCHORED_BASICS, - ANCHORED_NON_OVERLAPPING, - ANCHORED_LEFTMOST, - ANCHORED_LEFTMOST_LONGEST, -]; - -// Now define the individual tests that make up the collections above. - -/// A collection of tests for the Aho-Corasick algorithm that should always be -/// true regardless of match semantics. That is, all combinations of -/// leftmost-{shortest, first, longest} x {overlapping, non-overlapping} -/// should produce the same answer. -const BASICS: &'static [SearchTest] = &[ - t!(basic000, &[], "", &[]), - t!(basic001, &["a"], "", &[]), - t!(basic010, &["a"], "a", &[(0, 0, 1)]), - t!(basic020, &["a"], "aa", &[(0, 0, 1), (0, 1, 2)]), - t!(basic030, &["a"], "aaa", &[(0, 0, 1), (0, 1, 2), (0, 2, 3)]), - t!(basic040, &["a"], "aba", &[(0, 0, 1), (0, 2, 3)]), - t!(basic050, &["a"], "bba", &[(0, 2, 3)]), - t!(basic060, &["a"], "bbb", &[]), - t!(basic070, &["a"], "bababbbba", &[(0, 1, 2), (0, 3, 4), (0, 8, 9)]), - t!(basic100, &["aa"], "", &[]), - t!(basic110, &["aa"], "aa", &[(0, 0, 2)]), - t!(basic120, &["aa"], "aabbaa", &[(0, 0, 2), (0, 4, 6)]), - t!(basic130, &["aa"], "abbab", &[]), - t!(basic140, &["aa"], "abbabaa", &[(0, 5, 7)]), - t!(basic200, &["abc"], "abc", &[(0, 0, 3)]), - t!(basic210, &["abc"], "zazabzabcz", &[(0, 6, 9)]), - t!(basic220, &["abc"], "zazabczabcz", &[(0, 3, 6), (0, 7, 10)]), - t!(basic300, &["a", "b"], "", &[]), - t!(basic310, &["a", "b"], "z", &[]), - t!(basic320, &["a", "b"], "b", &[(1, 0, 1)]), - t!(basic330, &["a", "b"], "a", &[(0, 0, 1)]), - t!( - basic340, - &["a", "b"], - "abba", - &[(0, 0, 1), (1, 1, 2), (1, 2, 3), (0, 3, 4),] - ), - t!( - basic350, - &["b", "a"], - "abba", - &[(1, 0, 1), (0, 1, 2), (0, 2, 3), (1, 3, 4),] - ), - t!(basic360, &["abc", "bc"], "xbc", &[(1, 1, 3),]), - t!(basic400, &["foo", "bar"], "", &[]), - t!(basic410, &["foo", "bar"], "foobar", &[(0, 0, 3), (1, 3, 6),]), - t!(basic420, &["foo", "bar"], "barfoo", &[(1, 0, 3), (0, 3, 6),]), - t!(basic430, &["foo", "bar"], "foofoo", &[(0, 0, 3), (0, 3, 6),]), - t!(basic440, &["foo", "bar"], "barbar", &[(1, 0, 3), (1, 3, 6),]), - t!(basic450, &["foo", "bar"], "bafofoo", &[(0, 4, 7),]), - t!(basic460, &["bar", "foo"], "bafofoo", &[(1, 4, 7),]), - t!(basic470, &["foo", "bar"], "fobabar", &[(1, 4, 7),]), - t!(basic480, &["bar", "foo"], "fobabar", &[(0, 4, 7),]), - t!(basic600, &[""], "", &[(0, 0, 0)]), - t!(basic610, &[""], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(basic620, &[""], "abc", &[(0, 0, 0), (0, 1, 1), (0, 2, 2), (0, 3, 3)]), - t!(basic700, &["yabcdef", "abcdezghi"], "yabcdefghi", &[(0, 0, 7),]), - t!(basic710, &["yabcdef", "abcdezghi"], "yabcdezghi", &[(1, 1, 10),]), - t!( - basic720, - &["yabcdef", "bcdeyabc", "abcdezghi"], - "yabcdezghi", - &[(2, 1, 10),] - ), -]; - -/// A collection of *anchored* tests for the Aho-Corasick algorithm that should -/// always be true regardless of match semantics. That is, all combinations of -/// leftmost-{shortest, first, longest} x {overlapping, non-overlapping} should -/// produce the same answer. -const ANCHORED_BASICS: &'static [SearchTest] = &[ - t!(abasic000, &[], "", &[]), - t!(abasic010, &[""], "", &[(0, 0, 0)]), - t!(abasic020, &[""], "a", &[(0, 0, 0)]), - t!(abasic030, &[""], "abc", &[(0, 0, 0)]), - t!(abasic100, &["a"], "a", &[(0, 0, 1)]), - t!(abasic110, &["a"], "aa", &[(0, 0, 1)]), - t!(abasic120, &["a", "b"], "ab", &[(0, 0, 1)]), - t!(abasic130, &["a", "b"], "ba", &[(1, 0, 1)]), - t!(abasic140, &["foo", "foofoo"], "foo", &[(0, 0, 3)]), - t!(abasic150, &["foofoo", "foo"], "foo", &[(1, 0, 3)]), -]; - -/// Tests for non-overlapping standard match semantics. -/// -/// These tests generally shouldn't pass for leftmost-{first,longest}, although -/// some do in order to write clearer tests. For example, standard000 will -/// pass with leftmost-first semantics, but standard010 will not. We write -/// both to emphasize how the match semantics work. -const STANDARD: &'static [SearchTest] = &[ - t!(standard000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), - t!(standard010, &["abcd", "ab"], "abcd", &[(1, 0, 2)]), - t!(standard020, &["abcd", "ab", "abc"], "abcd", &[(1, 0, 2)]), - t!(standard030, &["abcd", "abc", "ab"], "abcd", &[(2, 0, 2)]), - t!(standard040, &["a", ""], "a", &[(1, 0, 0), (1, 1, 1)]), - t!( - standard400, - &["abcd", "bcd", "cd", "b"], - "abcd", - &[(3, 1, 2), (2, 2, 4),] - ), - t!(standard410, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1),]), - t!(standard420, &["", "a"], "aa", &[(0, 0, 0), (0, 1, 1), (0, 2, 2),]), - t!(standard430, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1),]), - t!(standard440, &["a", "", ""], "a", &[(1, 0, 0), (1, 1, 1),]), - t!(standard450, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1),]), -]; - -/// Like STANDARD, but for anchored searches. -const STANDARD_ANCHORED: &'static [SearchTest] = &[ - t!(astandard000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), - t!(astandard010, &["abcd", "ab"], "abcd", &[(1, 0, 2)]), - t!(astandard020, &["abcd", "ab", "abc"], "abcd", &[(1, 0, 2)]), - t!(astandard030, &["abcd", "abc", "ab"], "abcd", &[(2, 0, 2)]), - t!(astandard040, &["a", ""], "a", &[(1, 0, 0)]), - t!(astandard050, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4)]), - t!(astandard410, &["", "a"], "a", &[(0, 0, 0)]), - t!(astandard420, &["", "a"], "aa", &[(0, 0, 0)]), - t!(astandard430, &["", "a", ""], "a", &[(0, 0, 0)]), - t!(astandard440, &["a", "", ""], "a", &[(1, 0, 0)]), - t!(astandard450, &["", "", "a"], "a", &[(0, 0, 0)]), -]; - -/// Tests for non-overlapping leftmost match semantics. These should pass for -/// both leftmost-first and leftmost-longest match kinds. Stated differently, -/// among ambiguous matches, the longest match and the match that appeared -/// first when constructing the automaton should always be the same. -const LEFTMOST: &'static [SearchTest] = &[ - t!(leftmost000, &["ab", "ab"], "abcd", &[(0, 0, 2)]), - t!(leftmost010, &["a", ""], "a", &[(0, 0, 1), (1, 1, 1)]), - t!(leftmost020, &["", ""], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(leftmost030, &["a", "ab"], "aa", &[(0, 0, 1), (0, 1, 2)]), - t!(leftmost031, &["ab", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]), - t!(leftmost032, &["ab", "a"], "xayabbbz", &[(1, 1, 2), (0, 3, 5)]), - t!(leftmost300, &["abcd", "bce", "b"], "abce", &[(1, 1, 4)]), - t!(leftmost310, &["abcd", "ce", "bc"], "abce", &[(2, 1, 3)]), - t!(leftmost320, &["abcd", "bce", "ce", "b"], "abce", &[(1, 1, 4)]), - t!(leftmost330, &["abcd", "bce", "cz", "bc"], "abcz", &[(3, 1, 3)]), - t!(leftmost340, &["bce", "cz", "bc"], "bcz", &[(2, 0, 2)]), - t!(leftmost350, &["abc", "bd", "ab"], "abd", &[(2, 0, 2)]), - t!( - leftmost360, - &["abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(2, 0, 8),] - ), - t!( - leftmost370, - &["abcdefghi", "cde", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - leftmost380, - &["abcdefghi", "hz", "abcdefgh", "a"], - "abcdefghz", - &[(2, 0, 8),] - ), - t!( - leftmost390, - &["b", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - leftmost400, - &["h", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - leftmost410, - &["z", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8), (0, 8, 9),] - ), -]; - -/// Like LEFTMOST, but for anchored searches. -const ANCHORED_LEFTMOST: &'static [SearchTest] = &[ - t!(aleftmost000, &["ab", "ab"], "abcd", &[(0, 0, 2)]), - t!(aleftmost010, &["a", ""], "a", &[(0, 0, 1)]), - t!(aleftmost020, &["", ""], "a", &[(0, 0, 0)]), - t!(aleftmost030, &["a", "ab"], "aa", &[(0, 0, 1)]), - t!(aleftmost031, &["ab", "a"], "aa", &[(1, 0, 1)]), - t!(aleftmost032, &["ab", "a"], "xayabbbz", &[]), - t!(aleftmost300, &["abcd", "bce", "b"], "abce", &[]), - t!(aleftmost310, &["abcd", "ce", "bc"], "abce", &[]), - t!(aleftmost320, &["abcd", "bce", "ce", "b"], "abce", &[]), - t!(aleftmost330, &["abcd", "bce", "cz", "bc"], "abcz", &[]), - t!(aleftmost340, &["bce", "cz", "bc"], "bcz", &[(2, 0, 2)]), - t!(aleftmost350, &["abc", "bd", "ab"], "abd", &[(2, 0, 2)]), - t!( - aleftmost360, - &["abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(2, 0, 8),] - ), - t!( - aleftmost370, - &["abcdefghi", "cde", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - aleftmost380, - &["abcdefghi", "hz", "abcdefgh", "a"], - "abcdefghz", - &[(2, 0, 8),] - ), - t!( - aleftmost390, - &["b", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - aleftmost400, - &["h", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - aleftmost410, - &["z", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8)] - ), -]; - -/// Tests for non-overlapping leftmost-first match semantics. These tests -/// should generally be specific to leftmost-first, which means they should -/// generally fail under leftmost-longest semantics. -const LEFTMOST_FIRST: &'static [SearchTest] = &[ - t!(leftfirst000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), - t!(leftfirst010, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(leftfirst011, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1),]), - t!(leftfirst012, &["a", "", ""], "a", &[(0, 0, 1), (1, 1, 1),]), - t!(leftfirst013, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1),]), - t!(leftfirst020, &["abcd", "ab"], "abcd", &[(0, 0, 4)]), - t!(leftfirst030, &["ab", "ab"], "abcd", &[(0, 0, 2)]), - t!(leftfirst040, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (0, 3, 4)]), - t!(leftfirst100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(1, 1, 5)]), - t!(leftfirst110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]), - t!(leftfirst300, &["abcd", "b", "bce"], "abce", &[(1, 1, 2)]), - t!( - leftfirst310, - &["abcd", "b", "bce", "ce"], - "abce", - &[(1, 1, 2), (3, 2, 4),] - ), - t!( - leftfirst320, - &["a", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(0, 0, 1), (2, 7, 9),] - ), - t!(leftfirst330, &["a", "abab"], "abab", &[(0, 0, 1), (0, 2, 3)]), - t!(leftfirst400, &["amwix", "samwise", "sam"], "Zsamwix", &[(2, 1, 4)]), -]; - -/// Like LEFTMOST_FIRST, but for anchored searches. -const ANCHORED_LEFTMOST_FIRST: &'static [SearchTest] = &[ - t!(aleftfirst000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), - t!(aleftfirst010, &["", "a"], "a", &[(0, 0, 0)]), - t!(aleftfirst011, &["", "a", ""], "a", &[(0, 0, 0)]), - t!(aleftfirst012, &["a", "", ""], "a", &[(0, 0, 1)]), - t!(aleftfirst013, &["", "", "a"], "a", &[(0, 0, 0)]), - t!(aleftfirst020, &["abcd", "ab"], "abcd", &[(0, 0, 4)]), - t!(aleftfirst030, &["ab", "ab"], "abcd", &[(0, 0, 2)]), - t!(aleftfirst040, &["a", "ab"], "xayabbbz", &[]), - t!(aleftfirst100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[]), - t!(aleftfirst110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[]), - t!(aleftfirst300, &["abcd", "b", "bce"], "abce", &[]), - t!(aleftfirst310, &["abcd", "b", "bce", "ce"], "abce", &[]), - t!( - aleftfirst320, - &["a", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(0, 0, 1)] - ), - t!(aleftfirst330, &["a", "abab"], "abab", &[(0, 0, 1)]), - t!(aleftfirst400, &["wise", "samwise", "sam"], "samwix", &[(2, 0, 3)]), -]; - -/// Tests for non-overlapping leftmost-longest match semantics. These tests -/// should generally be specific to leftmost-longest, which means they should -/// generally fail under leftmost-first semantics. -const LEFTMOST_LONGEST: &'static [SearchTest] = &[ - t!(leftlong000, &["ab", "abcd"], "abcd", &[(1, 0, 4)]), - t!(leftlong010, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4),]), - t!(leftlong020, &["", "a"], "a", &[(1, 0, 1), (0, 1, 1),]), - t!(leftlong021, &["", "a", ""], "a", &[(1, 0, 1), (0, 1, 1),]), - t!(leftlong022, &["a", "", ""], "a", &[(0, 0, 1), (1, 1, 1),]), - t!(leftlong023, &["", "", "a"], "a", &[(2, 0, 1), (0, 1, 1),]), - t!(leftlong030, &["", "a"], "aa", &[(1, 0, 1), (1, 1, 2), (0, 2, 2),]), - t!(leftlong040, &["a", "ab"], "a", &[(0, 0, 1)]), - t!(leftlong050, &["a", "ab"], "ab", &[(1, 0, 2)]), - t!(leftlong060, &["ab", "a"], "a", &[(1, 0, 1)]), - t!(leftlong070, &["ab", "a"], "ab", &[(0, 0, 2)]), - t!(leftlong100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(2, 1, 6)]), - t!(leftlong110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]), - t!(leftlong300, &["abcd", "b", "bce"], "abce", &[(2, 1, 4)]), - t!( - leftlong310, - &["a", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!(leftlong320, &["a", "abab"], "abab", &[(1, 0, 4)]), - t!(leftlong330, &["abcd", "b", "ce"], "abce", &[(1, 1, 2), (2, 2, 4),]), - t!(leftlong340, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (1, 3, 5)]), -]; - -/// Like LEFTMOST_LONGEST, but for anchored searches. -const ANCHORED_LEFTMOST_LONGEST: &'static [SearchTest] = &[ - t!(aleftlong000, &["ab", "abcd"], "abcd", &[(1, 0, 4)]), - t!(aleftlong010, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4),]), - t!(aleftlong020, &["", "a"], "a", &[(1, 0, 1)]), - t!(aleftlong021, &["", "a", ""], "a", &[(1, 0, 1)]), - t!(aleftlong022, &["a", "", ""], "a", &[(0, 0, 1)]), - t!(aleftlong023, &["", "", "a"], "a", &[(2, 0, 1)]), - t!(aleftlong030, &["", "a"], "aa", &[(1, 0, 1)]), - t!(aleftlong040, &["a", "ab"], "a", &[(0, 0, 1)]), - t!(aleftlong050, &["a", "ab"], "ab", &[(1, 0, 2)]), - t!(aleftlong060, &["ab", "a"], "a", &[(1, 0, 1)]), - t!(aleftlong070, &["ab", "a"], "ab", &[(0, 0, 2)]), - t!(aleftlong100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[]), - t!(aleftlong110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[]), - t!(aleftlong300, &["abcd", "b", "bce"], "abce", &[]), - t!( - aleftlong310, - &["a", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!(aleftlong320, &["a", "abab"], "abab", &[(1, 0, 4)]), - t!(aleftlong330, &["abcd", "b", "ce"], "abce", &[]), - t!(aleftlong340, &["a", "ab"], "xayabbbz", &[]), -]; - -/// Tests for non-overlapping match semantics. -/// -/// Generally these tests shouldn't pass when using overlapping semantics. -/// These should pass for both standard and leftmost match semantics. -const NON_OVERLAPPING: &'static [SearchTest] = &[ - t!(nover010, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4),]), - t!(nover020, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4),]), - t!(nover030, &["abc", "bc"], "zazabcz", &[(0, 3, 6),]), - t!( - nover100, - &["ab", "ba"], - "abababa", - &[(0, 0, 2), (0, 2, 4), (0, 4, 6),] - ), - t!(nover200, &["foo", "foo"], "foobarfoo", &[(0, 0, 3), (0, 6, 9),]), - t!(nover300, &["", ""], "", &[(0, 0, 0),]), - t!(nover310, &["", ""], "a", &[(0, 0, 0), (0, 1, 1),]), -]; - -/// Like NON_OVERLAPPING, but for anchored searches. -const ANCHORED_NON_OVERLAPPING: &'static [SearchTest] = &[ - t!(anover010, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4),]), - t!(anover020, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4),]), - t!(anover030, &["abc", "bc"], "zazabcz", &[]), - t!(anover100, &["ab", "ba"], "abababa", &[(0, 0, 2)]), - t!(anover200, &["foo", "foo"], "foobarfoo", &[(0, 0, 3)]), - t!(anover300, &["", ""], "", &[(0, 0, 0),]), - t!(anover310, &["", ""], "a", &[(0, 0, 0)]), -]; - -/// Tests for overlapping match semantics. -/// -/// This only supports standard match semantics, since leftmost-{first,longest} -/// do not support overlapping matches. -const OVERLAPPING: &'static [SearchTest] = &[ - t!( - over000, - &["abcd", "bcd", "cd", "b"], - "abcd", - &[(3, 1, 2), (0, 0, 4), (1, 1, 4), (2, 2, 4),] - ), - t!( - over010, - &["bcd", "cd", "b", "abcd"], - "abcd", - &[(2, 1, 2), (3, 0, 4), (0, 1, 4), (1, 2, 4),] - ), - t!( - over020, - &["abcd", "bcd", "cd"], - "abcd", - &[(0, 0, 4), (1, 1, 4), (2, 2, 4),] - ), - t!( - over030, - &["bcd", "abcd", "cd"], - "abcd", - &[(1, 0, 4), (0, 1, 4), (2, 2, 4),] - ), - t!( - over040, - &["bcd", "cd", "abcd"], - "abcd", - &[(2, 0, 4), (0, 1, 4), (1, 2, 4),] - ), - t!(over050, &["abc", "bc"], "zazabcz", &[(0, 3, 6), (1, 4, 6),]), - t!( - over100, - &["ab", "ba"], - "abababa", - &[(0, 0, 2), (1, 1, 3), (0, 2, 4), (1, 3, 5), (0, 4, 6), (1, 5, 7),] - ), - t!( - over200, - &["foo", "foo"], - "foobarfoo", - &[(0, 0, 3), (1, 0, 3), (0, 6, 9), (1, 6, 9),] - ), - t!(over300, &["", ""], "", &[(0, 0, 0), (1, 0, 0),]), - t!( - over310, - &["", ""], - "a", - &[(0, 0, 0), (1, 0, 0), (0, 1, 1), (1, 1, 1),] - ), - t!(over320, &["", "a"], "a", &[(0, 0, 0), (1, 0, 1), (0, 1, 1),]), - t!( - over330, - &["", "a", ""], - "a", - &[(0, 0, 0), (2, 0, 0), (1, 0, 1), (0, 1, 1), (2, 1, 1),] - ), - t!( - over340, - &["a", "", ""], - "a", - &[(1, 0, 0), (2, 0, 0), (0, 0, 1), (1, 1, 1), (2, 1, 1),] - ), - t!( - over350, - &["", "", "a"], - "a", - &[(0, 0, 0), (1, 0, 0), (2, 0, 1), (0, 1, 1), (1, 1, 1),] - ), - t!( - over360, - &["foo", "foofoo"], - "foofoo", - &[(0, 0, 3), (1, 0, 6), (0, 3, 6)] - ), -]; - -/// Like OVERLAPPING, but for anchored searches. -const ANCHORED_OVERLAPPING: &'static [SearchTest] = &[ - t!(aover000, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4)]), - t!(aover010, &["bcd", "cd", "b", "abcd"], "abcd", &[(3, 0, 4)]), - t!(aover020, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4)]), - t!(aover030, &["bcd", "abcd", "cd"], "abcd", &[(1, 0, 4)]), - t!(aover040, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4)]), - t!(aover050, &["abc", "bc"], "zazabcz", &[]), - t!(aover100, &["ab", "ba"], "abababa", &[(0, 0, 2)]), - t!(aover200, &["foo", "foo"], "foobarfoo", &[(0, 0, 3), (1, 0, 3)]), - t!(aover300, &["", ""], "", &[(0, 0, 0), (1, 0, 0),]), - t!(aover310, &["", ""], "a", &[(0, 0, 0), (1, 0, 0)]), - t!(aover320, &["", "a"], "a", &[(0, 0, 0), (1, 0, 1)]), - t!(aover330, &["", "a", ""], "a", &[(0, 0, 0), (2, 0, 0), (1, 0, 1)]), - t!(aover340, &["a", "", ""], "a", &[(1, 0, 0), (2, 0, 0), (0, 0, 1)]), - t!(aover350, &["", "", "a"], "a", &[(0, 0, 0), (1, 0, 0), (2, 0, 1)]), - t!(aover360, &["foo", "foofoo"], "foofoo", &[(0, 0, 3), (1, 0, 6)]), -]; - -/// Tests for ASCII case insensitivity. -/// -/// These tests should all have the same behavior regardless of match semantics -/// or whether the search is overlapping. -const ASCII_CASE_INSENSITIVE: &'static [SearchTest] = &[ - t!(acasei000, &["a"], "A", &[(0, 0, 1)]), - t!(acasei010, &["Samwise"], "SAMWISE", &[(0, 0, 7)]), - t!(acasei011, &["Samwise"], "SAMWISE.abcd", &[(0, 0, 7)]), - t!(acasei020, &["fOoBaR"], "quux foobar baz", &[(0, 5, 11)]), -]; - -/// Like ASCII_CASE_INSENSITIVE, but specifically for non-overlapping tests. -const ASCII_CASE_INSENSITIVE_NON_OVERLAPPING: &'static [SearchTest] = &[ - t!(acasei000, &["foo", "FOO"], "fOo", &[(0, 0, 3)]), - t!(acasei000, &["FOO", "foo"], "fOo", &[(0, 0, 3)]), - t!(acasei010, &["abc", "def"], "abcdef", &[(0, 0, 3), (1, 3, 6)]), -]; - -/// Like ASCII_CASE_INSENSITIVE, but specifically for overlapping tests. -const ASCII_CASE_INSENSITIVE_OVERLAPPING: &'static [SearchTest] = &[ - t!(acasei000, &["foo", "FOO"], "fOo", &[(0, 0, 3), (1, 0, 3)]), - t!(acasei001, &["FOO", "foo"], "fOo", &[(0, 0, 3), (1, 0, 3)]), - // This is a regression test from: - // https://github.com/BurntSushi/aho-corasick/issues/68 - // Previously, it was reporting a duplicate (1, 3, 6) match. - t!( - acasei010, - &["abc", "def", "abcdef"], - "abcdef", - &[(0, 0, 3), (2, 0, 6), (1, 3, 6)] - ), -]; - -/// Regression tests that are applied to all Aho-Corasick combinations. -/// -/// If regression tests are needed for specific match semantics, then add them -/// to the appropriate group above. -const REGRESSION: &'static [SearchTest] = &[ - t!(regression010, &["inf", "ind"], "infind", &[(0, 0, 3), (1, 3, 6),]), - t!(regression020, &["ind", "inf"], "infind", &[(1, 0, 3), (0, 3, 6),]), - t!( - regression030, - &["libcore/", "libstd/"], - "libcore/char/methods.rs", - &[(0, 0, 8),] - ), - t!( - regression040, - &["libstd/", "libcore/"], - "libcore/char/methods.rs", - &[(1, 0, 8),] - ), - t!( - regression050, - &["\x00\x00\x01", "\x00\x00\x00"], - "\x00\x00\x00", - &[(1, 0, 3),] - ), - t!( - regression060, - &["\x00\x00\x00", "\x00\x00\x01"], - "\x00\x00\x00", - &[(0, 0, 3),] - ), -]; - -// Now define a test for each combination of things above that we want to run. -// Since there are a few different combinations for each collection of tests, -// we define a couple of macros to avoid repetition drudgery. The testconfig -// macro constructs the automaton from a given match kind, and runs the search -// tests one-by-one over the given collection. The `with` parameter allows one -// to configure the builder with additional parameters. The testcombo macro -// invokes testconfig in precisely this way: it sets up several tests where -// each one turns a different knob on AhoCorasickBuilder. - -macro_rules! testconfig { - (overlapping, $name:ident, $collection:expr, $kind:ident, $with:expr) => { - #[test] - fn $name() { - run_search_tests($collection, |test| { - let mut builder = AhoCorasickBuilder::new(); - $with(&mut builder); - builder - .match_kind(MatchKind::$kind) - .build(test.patterns) - .find_overlapping_iter(test.haystack) - .collect() - }); - } - }; - (stream, $name:ident, $collection:expr, $kind:ident, $with:expr) => { - #[test] - fn $name() { - run_search_tests($collection, |test| { - let buf = - io::BufReader::with_capacity(1, test.haystack.as_bytes()); - let mut builder = AhoCorasickBuilder::new(); - $with(&mut builder); - builder - .match_kind(MatchKind::$kind) - .build(test.patterns) - .stream_find_iter(buf) - .map(|result| result.unwrap()) - .collect() - }); - } - }; - ($name:ident, $collection:expr, $kind:ident, $with:expr) => { - #[test] - fn $name() { - run_search_tests($collection, |test| { - let mut builder = AhoCorasickBuilder::new(); - $with(&mut builder); - builder - .match_kind(MatchKind::$kind) - .build(test.patterns) - .find_iter(test.haystack) - .collect() - }); - } - }; -} - -macro_rules! testcombo { - ($name:ident, $collection:expr, $kind:ident) => { - mod $name { - use super::*; - - testconfig!(nfa_default, $collection, $kind, |_| ()); - testconfig!( - nfa_no_prefilter, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.prefilter(false); - } - ); - testconfig!( - nfa_all_sparse, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.dense_depth(0); - } - ); - testconfig!( - nfa_all_dense, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.dense_depth(usize::MAX); - } - ); - testconfig!( - dfa_default, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.dfa(true); - } - ); - testconfig!( - dfa_no_prefilter, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.dfa(true).prefilter(false); - } - ); - testconfig!( - dfa_all_sparse, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.dfa(true).dense_depth(0); - } - ); - testconfig!( - dfa_all_dense, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.dfa(true).dense_depth(usize::MAX); - } - ); - testconfig!( - dfa_no_byte_class, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - // TODO: remove tests when option is removed. - #[allow(deprecated)] - b.dfa(true).byte_classes(false); - } - ); - testconfig!( - dfa_no_premultiply, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - // TODO: remove tests when option is removed. - #[allow(deprecated)] - b.dfa(true).premultiply(false); - } - ); - testconfig!( - dfa_no_byte_class_no_premultiply, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - // TODO: remove tests when options are removed. - #[allow(deprecated)] - b.dfa(true).byte_classes(false).premultiply(false); - } - ); - } - }; -} - -// Write out the combinations. -testcombo!(search_leftmost_longest, AC_LEFTMOST_LONGEST, LeftmostLongest); -testcombo!(search_leftmost_first, AC_LEFTMOST_FIRST, LeftmostFirst); -testcombo!( - search_standard_nonoverlapping, - AC_STANDARD_NON_OVERLAPPING, - Standard -); - -// Write out the overlapping combo by hand since there is only one of them. -testconfig!( - overlapping, - search_standard_overlapping_nfa_default, - AC_STANDARD_OVERLAPPING, - Standard, - |_| () -); -testconfig!( - overlapping, - search_standard_overlapping_nfa_all_sparse, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.dense_depth(0); - } -); -testconfig!( - overlapping, - search_standard_overlapping_nfa_all_dense, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.dense_depth(usize::MAX); - } -); -testconfig!( - overlapping, - search_standard_overlapping_dfa_default, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.dfa(true); - } -); -testconfig!( - overlapping, - search_standard_overlapping_dfa_all_sparse, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.dfa(true).dense_depth(0); - } -); -testconfig!( - overlapping, - search_standard_overlapping_dfa_all_dense, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.dfa(true).dense_depth(usize::MAX); - } -); -testconfig!( - overlapping, - search_standard_overlapping_dfa_no_byte_class, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - // TODO: remove tests when option is removed. - #[allow(deprecated)] - b.dfa(true).byte_classes(false); - } -); -testconfig!( - overlapping, - search_standard_overlapping_dfa_no_premultiply, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - // TODO: remove tests when option is removed. - #[allow(deprecated)] - b.dfa(true).premultiply(false); - } -); -testconfig!( - overlapping, - search_standard_overlapping_dfa_no_byte_class_no_premultiply, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - // TODO: remove tests when options are removed. - #[allow(deprecated)] - b.dfa(true).byte_classes(false).premultiply(false); - } -); - -// Also write out tests manually for streams, since we only test the standard -// match semantics. We also don't bother testing different automaton -// configurations, since those are well covered by tests above. -testconfig!( - stream, - search_standard_stream_nfa_default, - AC_STANDARD_NON_OVERLAPPING, - Standard, - |_| () -); -testconfig!( - stream, - search_standard_stream_dfa_default, - AC_STANDARD_NON_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.dfa(true); - } -); - -// Same thing for anchored searches. Write them out manually. -testconfig!( - search_standard_anchored_nfa_default, - AC_STANDARD_ANCHORED_NON_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.anchored(true); - } -); -testconfig!( - search_standard_anchored_dfa_default, - AC_STANDARD_ANCHORED_NON_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.anchored(true).dfa(true); - } -); -testconfig!( - overlapping, - search_standard_anchored_overlapping_nfa_default, - AC_STANDARD_ANCHORED_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.anchored(true); - } -); -testconfig!( - overlapping, - search_standard_anchored_overlapping_dfa_default, - AC_STANDARD_ANCHORED_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.anchored(true).dfa(true); - } -); -testconfig!( - search_leftmost_first_anchored_nfa_default, - AC_LEFTMOST_FIRST_ANCHORED, - LeftmostFirst, - |b: &mut AhoCorasickBuilder| { - b.anchored(true); - } -); -testconfig!( - search_leftmost_first_anchored_dfa_default, - AC_LEFTMOST_FIRST_ANCHORED, - LeftmostFirst, - |b: &mut AhoCorasickBuilder| { - b.anchored(true).dfa(true); - } -); -testconfig!( - search_leftmost_longest_anchored_nfa_default, - AC_LEFTMOST_LONGEST_ANCHORED, - LeftmostLongest, - |b: &mut AhoCorasickBuilder| { - b.anchored(true); - } -); -testconfig!( - search_leftmost_longest_anchored_dfa_default, - AC_LEFTMOST_LONGEST_ANCHORED, - LeftmostLongest, - |b: &mut AhoCorasickBuilder| { - b.anchored(true).dfa(true); - } -); - -// And also write out the test combinations for ASCII case insensitivity. -testconfig!( - acasei_standard_nfa_default, - &[ASCII_CASE_INSENSITIVE], - Standard, - |b: &mut AhoCorasickBuilder| { - b.prefilter(false).ascii_case_insensitive(true); - } -); -testconfig!( - acasei_standard_dfa_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - Standard, - |b: &mut AhoCorasickBuilder| { - b.ascii_case_insensitive(true).dfa(true); - } -); -testconfig!( - overlapping, - acasei_standard_overlapping_nfa_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING], - Standard, - |b: &mut AhoCorasickBuilder| { - b.ascii_case_insensitive(true); - } -); -testconfig!( - overlapping, - acasei_standard_overlapping_dfa_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING], - Standard, - |b: &mut AhoCorasickBuilder| { - b.ascii_case_insensitive(true).dfa(true); - } -); -testconfig!( - acasei_leftmost_first_nfa_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - LeftmostFirst, - |b: &mut AhoCorasickBuilder| { - b.ascii_case_insensitive(true); - } -); -testconfig!( - acasei_leftmost_first_dfa_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - LeftmostFirst, - |b: &mut AhoCorasickBuilder| { - b.ascii_case_insensitive(true).dfa(true); - } -); -testconfig!( - acasei_leftmost_longest_nfa_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - LeftmostLongest, - |b: &mut AhoCorasickBuilder| { - b.ascii_case_insensitive(true); - } -); -testconfig!( - acasei_leftmost_longest_dfa_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - LeftmostLongest, - |b: &mut AhoCorasickBuilder| { - b.ascii_case_insensitive(true).dfa(true); - } -); - -fn run_search_tests Vec>( - which: TestCollection, - mut f: F, -) { - let get_match_triples = - |matches: Vec| -> Vec<(usize, usize, usize)> { - matches - .into_iter() - .map(|m| (m.pattern(), m.start(), m.end())) - .collect() - }; - for &tests in which { - for test in tests { - assert_eq!( - test.matches, - get_match_triples(f(&test)).as_slice(), - "test: {}, patterns: {:?}, haystack: {:?}", - test.name, - test.patterns, - test.haystack - ); - } - } -} - -#[test] -fn search_tests_have_unique_names() { - let assert = |constname, tests: &[SearchTest]| { - let mut seen = HashMap::new(); // map from test name to position - for (i, test) in tests.iter().enumerate() { - if !seen.contains_key(test.name) { - seen.insert(test.name, i); - } else { - let last = seen[test.name]; - panic!( - "{} tests have duplicate names at positions {} and {}", - constname, last, i - ); - } - } - }; - assert("BASICS", BASICS); - assert("STANDARD", STANDARD); - assert("LEFTMOST", LEFTMOST); - assert("LEFTMOST_FIRST", LEFTMOST_FIRST); - assert("LEFTMOST_LONGEST", LEFTMOST_LONGEST); - assert("NON_OVERLAPPING", NON_OVERLAPPING); - assert("OVERLAPPING", OVERLAPPING); - assert("REGRESSION", REGRESSION); -} - -#[test] -#[should_panic] -fn stream_not_allowed_leftmost_first() { - let fsm = AhoCorasickBuilder::new() - .match_kind(MatchKind::LeftmostFirst) - .build(None::); - assert_eq!(fsm.stream_find_iter(&b""[..]).count(), 0); -} - -#[test] -#[should_panic] -fn stream_not_allowed_leftmost_longest() { - let fsm = AhoCorasickBuilder::new() - .match_kind(MatchKind::LeftmostLongest) - .build(None::); - assert_eq!(fsm.stream_find_iter(&b""[..]).count(), 0); -} - -#[test] -#[should_panic] -fn overlapping_not_allowed_leftmost_first() { - let fsm = AhoCorasickBuilder::new() - .match_kind(MatchKind::LeftmostFirst) - .build(None::); - assert_eq!(fsm.find_overlapping_iter("").count(), 0); -} - -#[test] -#[should_panic] -fn overlapping_not_allowed_leftmost_longest() { - let fsm = AhoCorasickBuilder::new() - .match_kind(MatchKind::LeftmostLongest) - .build(None::); - assert_eq!(fsm.find_overlapping_iter("").count(), 0); -} - -#[test] -fn state_id_too_small() { - let mut patterns = vec![]; - for c1 in (b'a'..b'z').map(|b| b as char) { - for c2 in (b'a'..b'z').map(|b| b as char) { - for c3 in (b'a'..b'z').map(|b| b as char) { - patterns.push(format!("{}{}{}", c1, c2, c3)); - } - } - } - let result = - AhoCorasickBuilder::new().build_with_size::(&patterns); - assert!(result.is_err()); -} - -// See: https://github.com/BurntSushi/aho-corasick/issues/44 -// -// In short, this test ensures that enabling ASCII case insensitivity does not -// visit an exponential number of states when filling in failure transitions. -#[test] -fn regression_ascii_case_insensitive_no_exponential() { - let ac = AhoCorasickBuilder::new() - .ascii_case_insensitive(true) - .build(&["Tsubaki House-Triple Shot Vol01校花三姐妹"]); - assert!(ac.find("").is_none()); -} - -// See: https://github.com/BurntSushi/aho-corasick/issues/53 -// -// This test ensures that the rare byte prefilter works in a particular corner -// case. In particular, the shift offset detected for '/' in the patterns below -// was incorrect, leading to a false negative. -#[test] -fn regression_rare_byte_prefilter() { - use crate::AhoCorasick; - - let ac = AhoCorasick::new_auto_configured(&["ab/j/", "x/"]); - assert!(ac.is_match("ab/j/")); -} - -#[test] -fn regression_case_insensitive_prefilter() { - use crate::AhoCorasickBuilder; - - for c in b'a'..b'z' { - for c2 in b'a'..b'z' { - let c = c as char; - let c2 = c2 as char; - let needle = format!("{}{}", c, c2).to_lowercase(); - let haystack = needle.to_uppercase(); - let ac = AhoCorasickBuilder::new() - .ascii_case_insensitive(true) - .prefilter(true) - .build(&[&needle]); - assert_eq!( - 1, - ac.find_iter(&haystack).count(), - "failed to find {:?} in {:?}\n\nautomaton:\n{:?}", - needle, - haystack, - ac, - ); - } - } -} - -// See: https://github.com/BurntSushi/aho-corasick/issues/64 -// -// This occurs when the rare byte prefilter is active. -#[test] -fn regression_stream_rare_byte_prefilter() { - use std::io::Read; - - // NOTE: The test only fails if this ends with j. - const MAGIC: [u8; 5] = *b"1234j"; - - // NOTE: The test fails for value in 8188..=8191 These value put the string - // to search accross two call to read because the buffer size is 8192 by - // default. - const BEGIN: usize = 8191; - - /// This is just a structure that implements Reader. The reader - /// implementation will simulate a file filled with 0, except for the MAGIC - /// string at offset BEGIN. - #[derive(Default)] - struct R { - read: usize, - } - - impl Read for R { - fn read(&mut self, buf: &mut [u8]) -> ::std::io::Result { - //dbg!(buf.len()); - if self.read > 100000 { - return Ok(0); - } - let mut from = 0; - if self.read < BEGIN { - from = buf.len().min(BEGIN - self.read); - for x in 0..from { - buf[x] = 0; - } - self.read += from; - } - if self.read >= BEGIN && self.read <= BEGIN + MAGIC.len() { - let to = buf.len().min(BEGIN + MAGIC.len() - self.read + from); - if to > from { - buf[from..to].copy_from_slice( - &MAGIC - [self.read - BEGIN..self.read - BEGIN + to - from], - ); - self.read += to - from; - from = to; - } - } - for x in from..buf.len() { - buf[x] = 0; - self.read += 1; - } - Ok(buf.len()) - } - } - - fn run() -> ::std::io::Result<()> { - let aut = AhoCorasickBuilder::new().build(&[&MAGIC]); - - // While reading from a vector, it works: - let mut buf = vec![]; - R::default().read_to_end(&mut buf)?; - let from_whole = aut.find_iter(&buf).next().unwrap().start(); - - //But using stream_find_iter fails! - let mut file = R::default(); - let begin = aut - .stream_find_iter(&mut file) - .next() - .expect("NOT FOUND!!!!")? // Panic here - .start(); - assert_eq!(from_whole, begin); - Ok(()) - } - - run().unwrap() -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/UNLICENSE clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/UNLICENSE --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/UNLICENSE 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/aho-corasick/UNLICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -This is free and unencumbered software released into the public domain. - -Anyone is free to copy, modify, publish, use, compile, sell, or -distribute this software, either in source code form or as a compiled -binary, for any purpose, commercial or non-commercial, and by any -means. - -In jurisdictions that recognize copyright laws, the author or authors -of this software dedicate any and all copyright interest in the -software to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and -successors. We intend this dedication to be an overt act of -relinquishment in perpetuity of all present and future rights to this -software under copyright law. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -For more information, please refer to diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/build.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/build.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/build.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/build.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,76 +1,15 @@ -mod target { - use std::env; - use std::fs::File; - use std::io::Write; - use std::path::{Path, PathBuf}; - - pub fn main() { - let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); - - let mut dst = - File::create(Path::new(&out_dir).join("host-target.txt")).unwrap(); - dst.write_all(env::var("TARGET").unwrap().as_bytes()) - .unwrap(); - } -} - -mod testgen { - use std::char; - use std::env; - use std::ffi::OsStr; - use std::fs::{self, File}; - use std::io::Write; - use std::path::{Path, PathBuf}; - - pub fn main() { - let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); - let mut dst = - File::create(Path::new(&out_dir).join("tests.rs")).unwrap(); - - let manifest_dir = - PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()); - let headers_dir = manifest_dir.join("tests").join("headers"); - - let headers = match fs::read_dir(headers_dir) { - Ok(dir) => dir, - // We may not have headers directory after packaging. - Err(..) => return, - }; - - let entries = - headers.map(|result| result.expect("Couldn't read header file")); - - println!("cargo:rerun-if-changed=tests/headers"); - - for entry in entries { - match entry.path().extension().and_then(OsStr::to_str) { - Some("h") | Some("hpp") => { - let func = entry - .file_name() - .to_str() - .unwrap() - .replace(|c| !char::is_alphanumeric(c), "_") - .replace("__", "_") - .to_lowercase(); - writeln!( - dst, - "test_header!(header_{}, {:?});", - func, - entry.path(), - ) - .unwrap(); - } - _ => {} - } - } - - dst.flush().unwrap(); - } -} +use std::env; +use std::fs::File; +use std::io::Write; +use std::path::{Path, PathBuf}; fn main() { - target::main(); - testgen::main(); + let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); + + let mut dst = + File::create(Path::new(&out_dir).join("host-target.txt")).unwrap(); + dst.write_all(env::var("TARGET").unwrap().as_bytes()) + .unwrap(); // On behalf of clang_sys, rebuild ourselves if important configuration // variables change, to ensure that bindings get rebuilt if the @@ -85,6 +24,6 @@ ); println!( "cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS_{}", - std::env::var("TARGET").unwrap().replace("-", "_") + std::env::var("TARGET").unwrap().replace('-', "_") ); } diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/callbacks.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/callbacks.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/callbacks.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/callbacks.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,178 @@ +//! A public API for more fine-grained customization of bindgen behavior. + +pub use crate::ir::analysis::DeriveTrait; +pub use crate::ir::derive::CanDerive as ImplementsTrait; +pub use crate::ir::enum_ty::{EnumVariantCustomBehavior, EnumVariantValue}; +pub use crate::ir::int::IntKind; +use std::fmt; + +/// An enum to allow ignoring parsing of macros. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum MacroParsingBehavior { + /// Ignore the macro, generating no code for it, or anything that depends on + /// it. + Ignore, + /// The default behavior bindgen would have otherwise. + Default, +} + +impl Default for MacroParsingBehavior { + fn default() -> Self { + MacroParsingBehavior::Default + } +} + +/// A trait to allow configuring different kinds of types in different +/// situations. +pub trait ParseCallbacks: fmt::Debug { + #[cfg(feature = "__cli")] + #[doc(hidden)] + fn cli_args(&self) -> Vec { + vec![] + } + + /// This function will be run on every macro that is identified. + fn will_parse_macro(&self, _name: &str) -> MacroParsingBehavior { + MacroParsingBehavior::Default + } + + /// This function will run for every extern variable and function. The returned value determines + /// the name visible in the bindings. + fn generated_name_override( + &self, + _item_info: ItemInfo<'_>, + ) -> Option { + None + } + + /// This function will run for every extern variable and function. The returned value determines + /// the link name in the bindings. + fn generated_link_name_override( + &self, + _item_info: ItemInfo<'_>, + ) -> Option { + None + } + + /// The integer kind an integer macro should have, given a name and the + /// value of that macro, or `None` if you want the default to be chosen. + fn int_macro(&self, _name: &str, _value: i64) -> Option { + None + } + + /// This will be run on every string macro. The callback cannot influence the further + /// treatment of the macro, but may use the value to generate additional code or configuration. + fn str_macro(&self, _name: &str, _value: &[u8]) {} + + /// This will be run on every function-like macro. The callback cannot + /// influence the further treatment of the macro, but may use the value to + /// generate additional code or configuration. + /// + /// The first parameter represents the name and argument list (including the + /// parentheses) of the function-like macro. The second parameter represents + /// the expansion of the macro as a sequence of tokens. + fn func_macro(&self, _name: &str, _value: &[&[u8]]) {} + + /// This function should return whether, given an enum variant + /// name, and value, this enum variant will forcibly be a constant. + fn enum_variant_behavior( + &self, + _enum_name: Option<&str>, + _original_variant_name: &str, + _variant_value: EnumVariantValue, + ) -> Option { + None + } + + /// Allows to rename an enum variant, replacing `_original_variant_name`. + fn enum_variant_name( + &self, + _enum_name: Option<&str>, + _original_variant_name: &str, + _variant_value: EnumVariantValue, + ) -> Option { + None + } + + /// Allows to rename an item, replacing `_original_item_name`. + fn item_name(&self, _original_item_name: &str) -> Option { + None + } + + /// This will be called on every file inclusion, with the full path of the included file. + fn include_file(&self, _filename: &str) {} + + /// This will be called every time `bindgen` reads an environment variable whether it has any + /// content or not. + fn read_env_var(&self, _key: &str) {} + + /// This will be called to determine whether a particular blocklisted type + /// implements a trait or not. This will be used to implement traits on + /// other types containing the blocklisted type. + /// + /// * `None`: use the default behavior + /// * `Some(ImplementsTrait::Yes)`: `_name` implements `_derive_trait` + /// * `Some(ImplementsTrait::Manually)`: any type including `_name` can't + /// derive `_derive_trait` but can implemented it manually + /// * `Some(ImplementsTrait::No)`: `_name` doesn't implement `_derive_trait` + fn blocklisted_type_implements_trait( + &self, + _name: &str, + _derive_trait: DeriveTrait, + ) -> Option { + None + } + + /// Provide a list of custom derive attributes. + /// + /// If no additional attributes are wanted, this function should return an + /// empty `Vec`. + fn add_derives(&self, _info: &DeriveInfo<'_>) -> Vec { + vec![] + } + + /// Process a source code comment. + fn process_comment(&self, _comment: &str) -> Option { + None + } +} + +/// Relevant information about a type to which new derive attributes will be added using +/// [`ParseCallbacks::add_derives`]. +#[derive(Debug)] +#[non_exhaustive] +pub struct DeriveInfo<'a> { + /// The name of the type. + pub name: &'a str, + /// The kind of the type. + pub kind: TypeKind, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +/// The kind of the current type. +pub enum TypeKind { + /// The type is a Rust `struct`. + Struct, + /// The type is a Rust `enum`. + Enum, + /// The type is a Rust `union`. + Union, +} + +/// A struct providing information about the item being passed to [`ParseCallbacks::generated_name_override`]. +#[non_exhaustive] +pub struct ItemInfo<'a> { + /// The name of the item + pub name: &'a str, + /// The kind of item + pub kind: ItemKind, +} + +/// An enum indicating the kind of item for an ItemInfo. +#[non_exhaustive] +pub enum ItemKind { + /// A Function + Function, + /// A Variable + Var, +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/.cargo-checksum.json clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/.cargo-checksum.json --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/.cargo-checksum.json 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/.cargo-checksum.json 2023-08-15 22:24:19.000000000 +0000 @@ -1 +1 @@ -{"files":{"Cargo.lock":"a915231b52b67320c7d440eb711c99632e4b948e5dcbeb6835e18bd0d798c76a","Cargo.toml":"655f82f7efb9e6b434a4710d8b1ea5b8c2116bccc6d8a4f87a7abc9e0c69051b","LICENSE":"c23953d9deb0a3312dbeaf6c128a657f3591acee45067612fa68405eaa4525db","README.md":"c093205492ab9f00f275c50aacfc9058264d3dcc7c7d2ff83e0cc4858d1cee49","build.rs":"d53484feea4cd147cd80280ac270c24ab727386acabb043e6347c44ac5369d0e","csmith-fuzzing/README.md":"7107b70fedb0c0a0cadb3c439a49c1bd0119a6d38dc63b1aecc74d1942256ef2","src/callbacks.rs":"cb4ca440e356dde75919a5298b75cbf145c981c2e1da62907337706286dd5c9e","src/clang.rs":"6b02ae174012372d00b442b5ec5a66a6122a091217039e5ba4917578c769d01f","src/codegen/bitfield_unit.rs":"fddeaeab5859f4e82081865595b7705f5c0774d997df95fa5c655b81b9cae125","src/codegen/bitfield_unit_tests.rs":"9df86490de5e9d66ccea583dcb686dd440375dc1a3c3cf89a89d5de3883bf28a","src/codegen/dyngen.rs":"b1bca96fbd81b1c0678122df8d28f3b60cd74047a43d0d298d69feb06eecf459","src/codegen/error.rs":"5e308b8c54b68511fc8ea2ad15ddac510172c4ff460a80a265336440b0c9653d","src/codegen/helpers.rs":"b4e2ee991e83fda62b0aebd562b948eba785179cb4aa1a154d00ffad215b7be5","src/codegen/impl_debug.rs":"71d8e28873ba2de466f2637a824746963702f0511728037d72ee5670c51194cb","src/codegen/impl_partialeq.rs":"f4599e32c66179ae515a6518a3e94b686689cf59f7dd9ab618c3fb69f17d2c77","src/codegen/mod.rs":"a286fa9a31254ce317c4baad05af446b59aaa23fb80aba9f260e67d15c64ff8c","src/codegen/struct_layout.rs":"d03e66412f4bb1fa59c623873b2a22e100d029a002c07aaf4586f4852a410b54","src/deps.rs":"de4a91d1d252295e1abaf4ab1f90f7be618c67649edb12081c3a501e61398a75","src/extra_assertions.rs":"494534bd4f18b80d89b180c8a93733e6617edcf7deac413e9a73fd6e7bc9ced7","src/features.rs":"f93bb757400580a75adc6a187cdeb032ec4d6efe7d3fcb9a6864472edd875580","src/ir/analysis/derive.rs":"066d35cdb7523c5edd141394286911128261b4db23cc17520e3b3111ef1bb51e","src/ir/analysis/has_destructor.rs":"7a82f01e7e0595a31b56f7c398fa3019b3fe9100a2a73b56768f7e6943dcc3ce","src/ir/analysis/has_float.rs":"58ea1e38a59ef208400fd65d426cb5b288949df2d383b3a194fa01b99d2a87fa","src/ir/analysis/has_type_param_in_array.rs":"d1b9eb119dc14f662eb9bd1394c859f485479e4912589709cdd33f6752094e22","src/ir/analysis/has_vtable.rs":"368cf30fbe3fab7190fab48718b948caac5da8c9e797b709488716b919315636","src/ir/analysis/mod.rs":"cde4ce0667d1895008c9b2af479211c828740fcb59fa13d600cbdc100fa8bdc5","src/ir/analysis/sizedness.rs":"944443d6aab35d2dd80e4f5e59176ac1e1c463ba2f0eb25d33f1d95dfac1a6d0","src/ir/analysis/template_params.rs":"a2d2e247c2f51cd90e83f11bce0305c2e498232d015f88192b44e8522e7fd8b1","src/ir/annotations.rs":"456276ef7f9b04e40b7b10aa7570d98b11aae8efe676679881459ae878bbecfc","src/ir/comment.rs":"9c0c4789c0893b636fac42228f8a0292a06cb4f2b7431895490784dd16b7f79a","src/ir/comp.rs":"811a2abfbf8ed6925327ad005a460ca698d40a2d5d4698015e1bcd4e7d2c9cf0","src/ir/context.rs":"df486590515ffaab8b51c96699a239de202569a8718d9c4b79a8ccc8808cee69","src/ir/derive.rs":"e5581852eec87918901a129284b4965aefc8a19394187a8095779a084f28fabe","src/ir/dot.rs":"2d79d698e6ac59ce032840e62ff11103abed1d5e9e700cf383b492333eeebe1f","src/ir/enum_ty.rs":"c2d928bb1a8453922c962cb11a7ab3b737c5651599141ece8d31e21e6eb74585","src/ir/function.rs":"3e13078b36ee02142017cfbbaaeb9e64ef485a12e151096e12f54a8fde984505","src/ir/int.rs":"68a86182743ec338d58e42203364dc7c8970cb7ec3550433ca92f0c9489b4442","src/ir/item.rs":"1c79d6dd400ab01545a19214847245b440690bfe129895f164bef460ee41b857","src/ir/item_kind.rs":"7666a1ff1b8260978b790a08b4139ab56b5c65714a5652bbcec7faa7443adc36","src/ir/layout.rs":"d6bd9a14b94320f9e2517bf9fc9ffaf4220954fa24d77d90bba070dbede7392b","src/ir/mod.rs":"713cd537434567003197a123cbae679602c715e976d22f7b23dafd0826ea4c70","src/ir/module.rs":"7cae5561bcf84a5c3b1ee8f1c3336a33b7f44f0d5ffe885fb108289983fe763e","src/ir/objc.rs":"dd394c1db6546cbe5111ce5cd2f211f9839aba81c5e7228c2a68fba386bc259f","src/ir/template.rs":"3bb3e7f6ec28eff73c2032922d30b93d70da117b848e9cb02bdf6c9a74294f7f","src/ir/traversal.rs":"105d93bc2f1f55033c621667a0e55153844eec34560ae88183f799d0d0c1a6f2","src/ir/ty.rs":"2ecae57f018732b6daf1c08fc98765456a9e6a24cbceaf7f1bc004676b0113ee","src/ir/var.rs":"fe7720438af43fa3bbe3850aff331bb47131b2c21e975f92bfbcdc182789105a","src/lib.rs":"0f148aef6fd6ae814df29317fe5860d0c1747c40d5182f2518d3b81a03b6587a","src/log_stubs.rs":"9f974e041e35c8c7e29985d27ae5cd0858d68f8676d1dc005c6388d7d011707f","src/main.rs":"188cd89581490eb5f26a194cc25e4f38f3e0b93eed7ad591bc73362febd26b72","src/options.rs":"14190fae2aaad331f0660e4cc1d5a1fea0c2c88696091715867a3e7282a1d7b5","src/parse.rs":"4ffc54415eadb622ee488603862788c78361ef2c889de25259441a340c2a010f","src/regex_set.rs":"6c46357fb1ee68250e5e017cbf691f340041489ae78599eee7a5665a6ddce27f","src/time.rs":"8efe317e7c6b5ba8e0865ce7b49ca775ee8a02590f4241ef62f647fa3c22b68e"},"package":"2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8"} \ No newline at end of file +{"files":{"Cargo.toml":"1c290771bddd3cde261935e253cd7574b648d1b321a0f3466d429eca3a3cce64","LICENSE":"c23953d9deb0a3312dbeaf6c128a657f3591acee45067612fa68405eaa4525db","build.rs":"4a9c4ac3759572e17de312a9d3f4ced3b6fd3c71811729e5a8d06bfbd1ac8f82","callbacks.rs":"985f5e3b19b870ec90baa89187b5049514fc5a259bc74fd6fb2ee857c52c11ff","clang.rs":"ee5130a029688f0eadc854c9873824330b6539e2eae597e2198b51e4e8f124a5","codegen/bitfield_unit.rs":"fddeaeab5859f4e82081865595b7705f5c0774d997df95fa5c655b81b9cae125","codegen/bitfield_unit_tests.rs":"9df86490de5e9d66ccea583dcb686dd440375dc1a3c3cf89a89d5de3883bf28a","codegen/dyngen.rs":"6d8bed53c6de66bc658b3186041c2b75549f49b0f0363ff18b87c8dcf2f5a05b","codegen/error.rs":"fa02274debd9064f35a627c43407e4e47ca89f2becfb1c233a500440d6c73e00","codegen/helpers.rs":"cf9e60d18d17d624f3559b6dd65e75630a16e6c1b71666f7c9656e51053d10f8","codegen/impl_debug.rs":"80df6136327b1ca8c7d1c2961290b5ab00b85b49b22c02f26a590bc68fb230af","codegen/impl_partialeq.rs":"db739d7ba6f5ba4033d6bf62c276f35217c20eab27230cf07dadf59e8b2f71bb","codegen/mod.rs":"89156a1926556d7c46b0266aabbb7c4e5a4a93fe1e5fc088f86acd3b14203f17","codegen/postprocessing/merge_extern_blocks.rs":"284457a3c75e945217bab4e5a4280fef0fcc03c31e12cc5010aab87f34c0b6c7","codegen/postprocessing/mod.rs":"160a6d6701cabf2514e23570df1bd1b648c909cc27b7c583f21d98fe0c16722e","codegen/postprocessing/sort_semantically.rs":"f465d1e8cc119082eb79c164b5cd780a370821e8bf56585b287dd3b51fc4a542","codegen/serialize.rs":"bb99633ab6a6764b84dac86a873fa64c90aa4979f26e75fbeff9af365b3fefa8","codegen/struct_layout.rs":"5685fc6caa24ac2779fbb885064043898830c00c92819e8c0e4fd9564c641c4d","deps.rs":"5ee2332fdb10325f3b0a0c6d9ba94e13eb631ef39e955fa958afc3625bdb5448","diagnostics.rs":"dc40cd5e9710922422c5c9420e2351f5d976e7a1d7275e4f4ce742cad9eb53f8","extra_assertions.rs":"494534bd4f18b80d89b180c8a93733e6617edcf7deac413e9a73fd6e7bc9ced7","features.rs":"6c17e37bdd14355c9c3f93b67e539bf001ea113a9efe287527e9021d785b5bda","ir/analysis/derive.rs":"cba290e9c4ba271e90524149ad3b874f37843bfdfab12d513cc85d2665447fd5","ir/analysis/has_destructor.rs":"e7e95c3b0989b6375cd3eabaac85a36ecc2915a1fd3700c7d26fe04e8dc83ba3","ir/analysis/has_float.rs":"a56b97bf913f132c2c63dc202b45c692c416a8c9fdc6b2baeee30362fb0d4405","ir/analysis/has_type_param_in_array.rs":"788ebb4ba2cf46a22f1e4ff3005d51f38d414b72e95355f7ff4125521e2d9525","ir/analysis/has_vtable.rs":"83efa40ae89147170eabdff1387e60aba574ca4cd4cdef22692753594f09d6c6","ir/analysis/mod.rs":"ed161d9f60306ad42af2ae70ff0eb686a36e2fb30eb94918b5e5f19af80e1db7","ir/analysis/sizedness.rs":"f0a9302f3c6ad694d76cfab11dbaf5392ecaf7f04bc7b211a5a003776b963896","ir/analysis/template_params.rs":"8f73a640cdd3b8e4e05fd5818eec8d36ba240ff131e8b785da3270c1335827a1","ir/annotations.rs":"eaacb6508b02d7d494bcaa50b9ba7acbe15f90f22130d3a24e2573909c08776f","ir/comment.rs":"4c9c20b5a3da086211e92adec0822831dbc0b7ebee98fee313edcae9ae8d55ec","ir/comp.rs":"fb32715ed8fc14bee51c344a41c1f7a8a802d4a6dceb2775034ea33a88670df7","ir/context.rs":"8b9f502e85ed563b46fc11eacb2e2140c19e7527dce4e31831cc9a571fbf87ff","ir/derive.rs":"c21e470bb0091f20bfa366110880d48984fc3cf7071fdf36eccfa64f3eca231c","ir/dot.rs":"75bdfd83d9e754ba726f6a5529ba1d9ff46f5bf49bf237452985eb008fed0854","ir/enum_ty.rs":"f4bfa6d18ba4977fb66f5d5e4a7674eded93b761404d91cdd6fdd50029db455a","ir/function.rs":"4cb04fbf40e8e8d4128c6182c84f21026b99446daf29ccba0871bedb275a5f81","ir/int.rs":"601736f0ad0949e40684a9ce89bafbfefa71743df6ee6c342e44888a0f141ae0","ir/item.rs":"5c0d0d2a7a327ac0c6ba1aadcef710b6d399c24bee3fbbd1ab6386e871c44033","ir/item_kind.rs":"33e21104b0bb824a696a52cd520567ae56158010a1df14777e68ac5f8ad7e8fa","ir/layout.rs":"e704c9c8cd1532f9890a1c6b43e3b6e691565b6acc2a9ce07486a4674497f511","ir/mod.rs":"a3b98b1732111a980a795c72eaf1e09101e842ef2de76b4f2d4a7857f8d4cee4","ir/module.rs":"f82f380274e9adbab8017bc5e484a23d945e2cb7a97ce17c9cd2a2cfc505bb54","ir/objc.rs":"0f55ff60db706241634ed8396108ec84ecbec80e0cf28f68ab580c868e0e0cb4","ir/template.rs":"3f59efa9670ca90215d4374be869c9dbecb98a8d1041e7c6e4ab69a62bb982c2","ir/traversal.rs":"a4ec73d3533d4b93386153baf6a2ca846ee51228c76ed51105229d3ddcd74466","ir/ty.rs":"7e479d601229619cf39073fc3570f4211666cc042a60ab27c810bdde0e5d5690","ir/var.rs":"40d18226706de0ee5f002d0b5617dbcba35de0605edd531c75e3a76d000f0f4f","lib.rs":"ef2927a0a84d50b6bea44d9e95f69d2dc9fc7bc75aff8fc3a5edd2919613a81c","log_stubs.rs":"9f974e041e35c8c7e29985d27ae5cd0858d68f8676d1dc005c6388d7d011707f","options/as_args.rs":"3b3547e08f0cb72fa042cde417bbc8760166d11dc0db4812e7a280c93074d2f5","options/helpers.rs":"f4a7681e29b2dcc3be9249478c499d685b9e29d4f4ca4ae8bff7a91668cd8f15","options/mod.rs":"f06194a21bf5b4a7039d1be80e5b0b3e4a310f48084a6e2b7abbb1539d0c2004","parse.rs":"fce3616e0464aa7414888e5d00d4df18c83bb3034a1c807d36a07a3c586e475a","regex_set.rs":"8b38dce6b4b34712f7eafcb2817024de18fccf0cead0c175de34f78ea4027545","time.rs":"8efe317e7c6b5ba8e0865ce7b49ca775ee8a02590f4241ef62f647fa3c22b68e"},"package":"cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5"} \ No newline at end of file diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/Cargo.lock clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/Cargo.lock --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/Cargo.lock 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/Cargo.lock 1970-01-01 00:00:00.000000000 +0000 @@ -1,446 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "aho-corasick" -version = "0.7.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" -dependencies = [ - "memchr", -] - -[[package]] -name = "ansi_term" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -dependencies = [ - "winapi", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - -[[package]] -name = "bindgen" -version = "0.59.2" -dependencies = [ - "bitflags", - "cexpr", - "clang-sys", - "clap", - "diff", - "env_logger", - "lazy_static", - "lazycell", - "log", - "peeking_take_while", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", - "tempfile", - "which", -] - -[[package]] -name = "bitflags" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" - -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "clang-sys" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "853eda514c284c2287f4bf20ae614f8781f40a81d32ecda6e91449304dfe077c" -dependencies = [ - "glob", - "libc", - "libloading", -] - -[[package]] -name = "clap" -version = "2.33.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" -dependencies = [ - "ansi_term", - "atty", - "bitflags", - "strsim", - "textwrap", - "unicode-width", - "vec_map", -] - -[[package]] -name = "diff" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" - -[[package]] -name = "either" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" - -[[package]] -name = "env_logger" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "getrandom" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "glob" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - -[[package]] -name = "libc" -version = "0.2.98" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" - -[[package]] -name = "libloading" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" -dependencies = [ - "cfg-if", - "winapi", -] - -[[package]] -name = "log" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "memchr" -version = "2.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" - -[[package]] -name = "minimal-lexical" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c64630dcdd71f1a64c435f54885086a0de5d6a12d104d69b165fb7d5286d677" - -[[package]] -name = "nom" -version = "7.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffd9d26838a953b4af82cbeb9f1592c6798916983959be223a7124e992742c1" -dependencies = [ - "memchr", - "minimal-lexical", - "version_check", -] - -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - -[[package]] -name = "ppv-lite86" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" - -[[package]] -name = "proc-macro2" -version = "1.0.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7ed8b8c7b886ea3ed7dde405212185f423ab44682667c8c6dd14aa1d9f6612" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "quote" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", - "rand_hc", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" -dependencies = [ - "getrandom", -] - -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core", -] - -[[package]] -name = "redox_syscall" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" -dependencies = [ - "bitflags", -] - -[[package]] -name = "regex" -version = "1.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.6.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "shlex" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a568c8f2cd051a4d283bd6eb0343ac214c1b0f1ac19f93e1175b2dee38c73d" - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "tempfile" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" -dependencies = [ - "cfg-if", - "libc", - "rand", - "redox_syscall", - "remove_dir_all", - "winapi", -] - -[[package]] -name = "termcolor" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "unicode-width" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" - -[[package]] -name = "unicode-xid" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" - -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - -[[package]] -name = "version_check" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" - -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - -[[package]] -name = "which" -version = "4.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc009ab82a2afc94b9e467ab4214aee9cad1356cd9191264203d7d72006e00d" -dependencies = [ - "either", - "lazy_static", - "libc", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/Cargo.toml clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/Cargo.toml --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/Cargo.toml 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/Cargo.toml 2023-08-15 22:24:19.000000000 +0000 @@ -11,28 +11,41 @@ [package] edition = "2018" +rust-version = "1.60.0" name = "bindgen" -version = "0.59.2" -authors = ["Jyun-Yan You ", "Emilio Cobos Álvarez ", "Nick Fitzgerald ", "The Servo project developers"] +version = "0.65.1" +authors = [ + "Jyun-Yan You ", + "Emilio Cobos Álvarez ", + "Nick Fitzgerald ", + "The Servo project developers", +] build = "build.rs" -include = ["LICENSE", "README.md", "Cargo.toml", "build.rs", "src/*.rs", "src/**/*.rs"] description = "Automatically generates Rust FFI bindings to C and C++ libraries." homepage = "https://rust-lang.github.io/rust-bindgen/" documentation = "https://docs.rs/bindgen" -readme = "README.md" -keywords = ["bindings", "ffi", "code-generation"] -categories = ["external-ffi-bindings", "development-tools::ffi"] +readme = "../README.md" +keywords = [ + "bindings", + "ffi", + "code-generation", +] +categories = [ + "external-ffi-bindings", + "development-tools::ffi", +] license = "BSD-3-Clause" repository = "https://github.com/rust-lang/rust-bindgen" [lib] -path = "src/lib.rs" - -[[bin]] name = "bindgen" -path = "src/main.rs" -doc = false -required-features = ["clap"] +path = "lib.rs" + +[dependencies.annotate-snippets] +version = "0.9.1" +features = ["color"] +optional = true + [dependencies.bitflags] version = "1.0.3" @@ -43,14 +56,6 @@ version = "1" features = ["clang_6_0"] -[dependencies.clap] -version = "2" -optional = true - -[dependencies.env_logger] -version = "0.9.0" -optional = true - [dependencies.lazy_static] version = "1" @@ -64,6 +69,9 @@ [dependencies.peeking_take_while] version = "0.1.2" +[dependencies.prettyplease] +version = "0.2.0" + [dependencies.proc-macro2] version = "1" default-features = false @@ -73,8 +81,11 @@ default-features = false [dependencies.regex] -version = "1.0" -features = ["std", "unicode"] +version = "1.5" +features = [ + "std", + "unicode", +] default-features = false [dependencies.rustc-hash] @@ -83,33 +94,31 @@ [dependencies.shlex] version = "1" +[dependencies.syn] +version = "2.0" +features = [ + "full", + "extra-traits", + "visit-mut", +] + [dependencies.which] version = "4.2.1" optional = true default-features = false -[dev-dependencies.clap] -version = "2" - -[dev-dependencies.diff] -version = "0.1" - -[dev-dependencies.shlex] -version = "1" - -[dev-dependencies.tempfile] -version = "3" [features] -default = ["logging", "clap", "runtime", "which-rustfmt"] -logging = ["env_logger", "log"] +__cli = [] +default = [ + "logging", + "runtime", + "which-rustfmt", +] +experimental = ["annotate-snippets"] +logging = ["log"] runtime = ["clang-sys/runtime"] static = ["clang-sys/static"] -testing_only_docs = [] testing_only_extra_assertions = [] -testing_only_libclang_3_9 = [] -testing_only_libclang_4 = [] testing_only_libclang_5 = [] testing_only_libclang_9 = [] which-rustfmt = ["which"] -[badges.travis-ci] -repository = "rust-lang/rust-bindgen" diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/clang.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/clang.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/clang.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/clang.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,2236 @@ +//! A higher level Clang API built on top of the generated bindings in the +//! `clang_sys` module. + +#![allow(non_upper_case_globals, dead_code)] +#![deny(clippy::missing_docs_in_private_items)] + +use crate::ir::context::BindgenContext; +use clang_sys::*; +use std::ffi::{CStr, CString}; +use std::fmt; +use std::hash::Hash; +use std::hash::Hasher; +use std::os::raw::{c_char, c_int, c_longlong, c_uint, c_ulong, c_ulonglong}; +use std::{mem, ptr, slice}; + +/// Type representing a clang attribute. +/// +/// Values of this type can be used to check for different attributes using the `has_attrs` +/// function. +pub(crate) struct Attribute { + name: &'static [u8], + kind: Option, + token_kind: CXTokenKind, +} + +impl Attribute { + /// A `warn_unused_result` attribute. + pub(crate) const MUST_USE: Self = Self { + name: b"warn_unused_result", + // FIXME(emilio): clang-sys doesn't expose `CXCursor_WarnUnusedResultAttr` (from clang 9). + kind: Some(440), + token_kind: CXToken_Identifier, + }; + + /// A `_Noreturn` attribute. + pub(crate) const NO_RETURN: Self = Self { + name: b"_Noreturn", + kind: None, + token_kind: CXToken_Keyword, + }; + + /// A `[[noreturn]]` attribute. + pub(crate) const NO_RETURN_CPP: Self = Self { + name: b"noreturn", + kind: None, + token_kind: CXToken_Identifier, + }; +} + +/// A cursor into the Clang AST, pointing to an AST node. +/// +/// We call the AST node pointed to by the cursor the cursor's "referent". +#[derive(Copy, Clone)] +pub(crate) struct Cursor { + x: CXCursor, +} + +impl fmt::Debug for Cursor { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!( + fmt, + "Cursor({} kind: {}, loc: {}, usr: {:?})", + self.spelling(), + kind_to_str(self.kind()), + self.location(), + self.usr() + ) + } +} + +impl Cursor { + /// Get the Unified Symbol Resolution for this cursor's referent, if + /// available. + /// + /// The USR can be used to compare entities across translation units. + pub(crate) fn usr(&self) -> Option { + let s = unsafe { cxstring_into_string(clang_getCursorUSR(self.x)) }; + if s.is_empty() { + None + } else { + Some(s) + } + } + + /// Is this cursor's referent a declaration? + pub(crate) fn is_declaration(&self) -> bool { + unsafe { clang_isDeclaration(self.kind()) != 0 } + } + + /// Is this cursor's referent an anonymous record or so? + pub(crate) fn is_anonymous(&self) -> bool { + unsafe { clang_Cursor_isAnonymous(self.x) != 0 } + } + + /// Get this cursor's referent's spelling. + pub(crate) fn spelling(&self) -> String { + unsafe { cxstring_into_string(clang_getCursorSpelling(self.x)) } + } + + /// Get this cursor's referent's display name. + /// + /// This is not necessarily a valid identifier. It includes extra + /// information, such as parameters for a function, etc. + pub(crate) fn display_name(&self) -> String { + unsafe { cxstring_into_string(clang_getCursorDisplayName(self.x)) } + } + + /// Get the mangled name of this cursor's referent. + pub(crate) fn mangling(&self) -> String { + unsafe { cxstring_into_string(clang_Cursor_getMangling(self.x)) } + } + + /// Gets the C++ manglings for this cursor, or an error if the manglings + /// are not available. + pub(crate) fn cxx_manglings(&self) -> Result, ()> { + use clang_sys::*; + unsafe { + let manglings = clang_Cursor_getCXXManglings(self.x); + if manglings.is_null() { + return Err(()); + } + let count = (*manglings).Count as usize; + + let mut result = Vec::with_capacity(count); + for i in 0..count { + let string_ptr = (*manglings).Strings.add(i); + result.push(cxstring_to_string_leaky(*string_ptr)); + } + clang_disposeStringSet(manglings); + Ok(result) + } + } + + /// Returns whether the cursor refers to a built-in definition. + pub(crate) fn is_builtin(&self) -> bool { + let (file, _, _, _) = self.location().location(); + file.name().is_none() + } + + /// Get the `Cursor` for this cursor's referent's lexical parent. + /// + /// The lexical parent is the parent of the definition. The semantic parent + /// is the parent of the declaration. Generally, the lexical parent doesn't + /// have any effect on semantics, while the semantic parent does. + /// + /// In the following snippet, the `Foo` class would be the semantic parent + /// of the out-of-line `method` definition, while the lexical parent is the + /// translation unit. + /// + /// ```c++ + /// class Foo { + /// void method(); + /// }; + /// + /// void Foo::method() { /* ... */ } + /// ``` + pub(crate) fn lexical_parent(&self) -> Cursor { + unsafe { + Cursor { + x: clang_getCursorLexicalParent(self.x), + } + } + } + + /// Get the referent's semantic parent, if one is available. + /// + /// See documentation for `lexical_parent` for details on semantic vs + /// lexical parents. + pub(crate) fn fallible_semantic_parent(&self) -> Option { + let sp = unsafe { + Cursor { + x: clang_getCursorSemanticParent(self.x), + } + }; + if sp == *self || !sp.is_valid() { + return None; + } + Some(sp) + } + + /// Get the referent's semantic parent. + /// + /// See documentation for `lexical_parent` for details on semantic vs + /// lexical parents. + pub(crate) fn semantic_parent(&self) -> Cursor { + self.fallible_semantic_parent().unwrap() + } + + /// Return the number of template arguments used by this cursor's referent, + /// if the referent is either a template instantiation. Returns `None` + /// otherwise. + /// + /// NOTE: This may not return `Some` for partial template specializations, + /// see #193 and #194. + pub(crate) fn num_template_args(&self) -> Option { + // XXX: `clang_Type_getNumTemplateArguments` is sort of reliable, while + // `clang_Cursor_getNumTemplateArguments` is totally unreliable. + // Therefore, try former first, and only fallback to the latter if we + // have to. + self.cur_type() + .num_template_args() + .or_else(|| { + let n: c_int = + unsafe { clang_Cursor_getNumTemplateArguments(self.x) }; + + if n >= 0 { + Some(n as u32) + } else { + debug_assert_eq!(n, -1); + None + } + }) + .or_else(|| { + let canonical = self.canonical(); + if canonical != *self { + canonical.num_template_args() + } else { + None + } + }) + } + + /// Get a cursor pointing to this referent's containing translation unit. + /// + /// Note that we shouldn't create a `TranslationUnit` struct here, because + /// bindgen assumes there will only be one of them alive at a time, and + /// disposes it on drop. That can change if this would be required, but I + /// think we can survive fine without it. + pub(crate) fn translation_unit(&self) -> Cursor { + assert!(self.is_valid()); + unsafe { + let tu = clang_Cursor_getTranslationUnit(self.x); + let cursor = Cursor { + x: clang_getTranslationUnitCursor(tu), + }; + assert!(cursor.is_valid()); + cursor + } + } + + /// Is the referent a top level construct? + pub(crate) fn is_toplevel(&self) -> bool { + let mut semantic_parent = self.fallible_semantic_parent(); + + while semantic_parent.is_some() && + (semantic_parent.unwrap().kind() == CXCursor_Namespace || + semantic_parent.unwrap().kind() == + CXCursor_NamespaceAlias || + semantic_parent.unwrap().kind() == CXCursor_NamespaceRef) + { + semantic_parent = + semantic_parent.unwrap().fallible_semantic_parent(); + } + + let tu = self.translation_unit(); + // Yes, this can happen with, e.g., macro definitions. + semantic_parent == tu.fallible_semantic_parent() + } + + /// There are a few kinds of types that we need to treat specially, mainly + /// not tracking the type declaration but the location of the cursor, given + /// clang doesn't expose a proper declaration for these types. + pub(crate) fn is_template_like(&self) -> bool { + matches!( + self.kind(), + CXCursor_ClassTemplate | + CXCursor_ClassTemplatePartialSpecialization | + CXCursor_TypeAliasTemplateDecl + ) + } + + /// Is this Cursor pointing to a function-like macro definition? + pub(crate) fn is_macro_function_like(&self) -> bool { + unsafe { clang_Cursor_isMacroFunctionLike(self.x) != 0 } + } + + /// Get the kind of referent this cursor is pointing to. + pub(crate) fn kind(&self) -> CXCursorKind { + self.x.kind + } + + /// Returns true if the cursor is a definition + pub(crate) fn is_definition(&self) -> bool { + unsafe { clang_isCursorDefinition(self.x) != 0 } + } + + /// Is the referent a template specialization? + pub(crate) fn is_template_specialization(&self) -> bool { + self.specialized().is_some() + } + + /// Is the referent a fully specialized template specialization without any + /// remaining free template arguments? + pub(crate) fn is_fully_specialized_template(&self) -> bool { + self.is_template_specialization() && + self.kind() != CXCursor_ClassTemplatePartialSpecialization && + self.num_template_args().unwrap_or(0) > 0 + } + + /// Is the referent a template specialization that still has remaining free + /// template arguments? + pub(crate) fn is_in_non_fully_specialized_template(&self) -> bool { + if self.is_toplevel() { + return false; + } + + let parent = self.semantic_parent(); + if parent.is_fully_specialized_template() { + return false; + } + + if !parent.is_template_like() { + return parent.is_in_non_fully_specialized_template(); + } + + true + } + + /// Is the referent any kind of template parameter? + pub(crate) fn is_template_parameter(&self) -> bool { + matches!( + self.kind(), + CXCursor_TemplateTemplateParameter | + CXCursor_TemplateTypeParameter | + CXCursor_NonTypeTemplateParameter + ) + } + + /// Does the referent's type or value depend on a template parameter? + pub(crate) fn is_dependent_on_template_parameter(&self) -> bool { + fn visitor( + found_template_parameter: &mut bool, + cur: Cursor, + ) -> CXChildVisitResult { + // If we found a template parameter, it is dependent. + if cur.is_template_parameter() { + *found_template_parameter = true; + return CXChildVisit_Break; + } + + // Get the referent and traverse it as well. + if let Some(referenced) = cur.referenced() { + if referenced.is_template_parameter() { + *found_template_parameter = true; + return CXChildVisit_Break; + } + + referenced + .visit(|next| visitor(found_template_parameter, next)); + if *found_template_parameter { + return CXChildVisit_Break; + } + } + + // Continue traversing the AST at the original cursor. + CXChildVisit_Recurse + } + + if self.is_template_parameter() { + return true; + } + + let mut found_template_parameter = false; + self.visit(|next| visitor(&mut found_template_parameter, next)); + + found_template_parameter + } + + /// Is this cursor pointing a valid referent? + pub(crate) fn is_valid(&self) -> bool { + unsafe { clang_isInvalid(self.kind()) == 0 } + } + + /// Get the source location for the referent. + pub(crate) fn location(&self) -> SourceLocation { + unsafe { + SourceLocation { + x: clang_getCursorLocation(self.x), + } + } + } + + /// Get the source location range for the referent. + pub(crate) fn extent(&self) -> CXSourceRange { + unsafe { clang_getCursorExtent(self.x) } + } + + /// Get the raw declaration comment for this referent, if one exists. + pub(crate) fn raw_comment(&self) -> Option { + let s = unsafe { + cxstring_into_string(clang_Cursor_getRawCommentText(self.x)) + }; + if s.is_empty() { + None + } else { + Some(s) + } + } + + /// Get the referent's parsed comment. + pub(crate) fn comment(&self) -> Comment { + unsafe { + Comment { + x: clang_Cursor_getParsedComment(self.x), + } + } + } + + /// Get the referent's type. + pub(crate) fn cur_type(&self) -> Type { + unsafe { + Type { + x: clang_getCursorType(self.x), + } + } + } + + /// Given that this cursor's referent is a reference to another type, or is + /// a declaration, get the cursor pointing to the referenced type or type of + /// the declared thing. + pub(crate) fn definition(&self) -> Option { + unsafe { + let ret = Cursor { + x: clang_getCursorDefinition(self.x), + }; + + if ret.is_valid() && ret.kind() != CXCursor_NoDeclFound { + Some(ret) + } else { + None + } + } + } + + /// Given that this cursor's referent is reference type, get the cursor + /// pointing to the referenced type. + pub(crate) fn referenced(&self) -> Option { + unsafe { + let ret = Cursor { + x: clang_getCursorReferenced(self.x), + }; + + if ret.is_valid() { + Some(ret) + } else { + None + } + } + } + + /// Get the canonical cursor for this referent. + /// + /// Many types can be declared multiple times before finally being properly + /// defined. This method allows us to get the canonical cursor for the + /// referent type. + pub(crate) fn canonical(&self) -> Cursor { + unsafe { + Cursor { + x: clang_getCanonicalCursor(self.x), + } + } + } + + /// Given that this cursor points to either a template specialization or a + /// template instantiation, get a cursor pointing to the template definition + /// that is being specialized. + pub(crate) fn specialized(&self) -> Option { + unsafe { + let ret = Cursor { + x: clang_getSpecializedCursorTemplate(self.x), + }; + if ret.is_valid() { + Some(ret) + } else { + None + } + } + } + + /// Assuming that this cursor's referent is a template declaration, get the + /// kind of cursor that would be generated for its specializations. + pub(crate) fn template_kind(&self) -> CXCursorKind { + unsafe { clang_getTemplateCursorKind(self.x) } + } + + /// Traverse this cursor's referent and its children. + /// + /// Call the given function on each AST node traversed. + pub(crate) fn visit(&self, mut visitor: Visitor) + where + Visitor: FnMut(Cursor) -> CXChildVisitResult, + { + let data = &mut visitor as *mut Visitor; + unsafe { + clang_visitChildren(self.x, visit_children::, data.cast()); + } + } + + /// Collect all of this cursor's children into a vec and return them. + pub(crate) fn collect_children(&self) -> Vec { + let mut children = vec![]; + self.visit(|c| { + children.push(c); + CXChildVisit_Continue + }); + children + } + + /// Does this cursor have any children? + pub(crate) fn has_children(&self) -> bool { + let mut has_children = false; + self.visit(|_| { + has_children = true; + CXChildVisit_Break + }); + has_children + } + + /// Does this cursor have at least `n` children? + pub(crate) fn has_at_least_num_children(&self, n: usize) -> bool { + assert!(n > 0); + let mut num_left = n; + self.visit(|_| { + num_left -= 1; + if num_left == 0 { + CXChildVisit_Break + } else { + CXChildVisit_Continue + } + }); + num_left == 0 + } + + /// Returns whether the given location contains a cursor with the given + /// kind in the first level of nesting underneath (doesn't look + /// recursively). + pub(crate) fn contains_cursor(&self, kind: CXCursorKind) -> bool { + let mut found = false; + + self.visit(|c| { + if c.kind() == kind { + found = true; + CXChildVisit_Break + } else { + CXChildVisit_Continue + } + }); + + found + } + + /// Is the referent an inlined function? + pub(crate) fn is_inlined_function(&self) -> bool { + unsafe { clang_Cursor_isFunctionInlined(self.x) != 0 } + } + + /// Is the referent a defaulted function? + pub(crate) fn is_defaulted_function(&self) -> bool { + unsafe { clang_CXXMethod_isDefaulted(self.x) != 0 } + } + + /// Is the referent a deleted function? + pub(crate) fn is_deleted_function(&self) -> bool { + // Unfortunately, libclang doesn't yet have an API for checking if a + // member function is deleted, but the following should be a good + // enough approximation. + // Deleted functions are implicitly inline according to paragraph 4 of + // [dcl.fct.def.delete] in the C++ standard. Normal inline functions + // have a definition in the same translation unit, so if this is an + // inline function without a definition, and it's not a defaulted + // function, we can reasonably safely conclude that it's a deleted + // function. + self.is_inlined_function() && + self.definition().is_none() && + !self.is_defaulted_function() + } + + /// Is the referent a bit field declaration? + pub(crate) fn is_bit_field(&self) -> bool { + unsafe { clang_Cursor_isBitField(self.x) != 0 } + } + + /// Get a cursor to the bit field's width expression, or `None` if it's not + /// a bit field. + pub(crate) fn bit_width_expr(&self) -> Option { + if !self.is_bit_field() { + return None; + } + + let mut result = None; + self.visit(|cur| { + // The first child may or may not be a TypeRef, depending on whether + // the field's type is builtin. Skip it. + if cur.kind() == CXCursor_TypeRef { + return CXChildVisit_Continue; + } + + // The next expression or literal is the bit width. + result = Some(cur); + + CXChildVisit_Break + }); + + result + } + + /// Get the width of this cursor's referent bit field, or `None` if the + /// referent is not a bit field or if the width could not be evaluated. + pub(crate) fn bit_width(&self) -> Option { + // It is not safe to check the bit width without ensuring it doesn't + // depend on a template parameter. See + // https://github.com/rust-lang/rust-bindgen/issues/2239 + if self.bit_width_expr()?.is_dependent_on_template_parameter() { + return None; + } + + unsafe { + let w = clang_getFieldDeclBitWidth(self.x); + if w == -1 { + None + } else { + Some(w as u32) + } + } + } + + /// Get the integer representation type used to hold this cursor's referent + /// enum type. + pub(crate) fn enum_type(&self) -> Option { + unsafe { + let t = Type { + x: clang_getEnumDeclIntegerType(self.x), + }; + if t.is_valid() { + Some(t) + } else { + None + } + } + } + + /// Get the boolean constant value for this cursor's enum variant referent. + /// + /// Returns None if the cursor's referent is not an enum variant. + pub(crate) fn enum_val_boolean(&self) -> Option { + unsafe { + if self.kind() == CXCursor_EnumConstantDecl { + Some(clang_getEnumConstantDeclValue(self.x) != 0) + } else { + None + } + } + } + + /// Get the signed constant value for this cursor's enum variant referent. + /// + /// Returns None if the cursor's referent is not an enum variant. + pub(crate) fn enum_val_signed(&self) -> Option { + unsafe { + if self.kind() == CXCursor_EnumConstantDecl { + #[allow(clippy::unnecessary_cast)] + Some(clang_getEnumConstantDeclValue(self.x) as i64) + } else { + None + } + } + } + + /// Get the unsigned constant value for this cursor's enum variant referent. + /// + /// Returns None if the cursor's referent is not an enum variant. + pub(crate) fn enum_val_unsigned(&self) -> Option { + unsafe { + if self.kind() == CXCursor_EnumConstantDecl { + #[allow(clippy::unnecessary_cast)] + Some(clang_getEnumConstantDeclUnsignedValue(self.x) as u64) + } else { + None + } + } + } + + /// Does this cursor have the given attributes? + pub(crate) fn has_attrs( + &self, + attrs: &[Attribute; N], + ) -> [bool; N] { + let mut found_attrs = [false; N]; + let mut found_count = 0; + + self.visit(|cur| { + let kind = cur.kind(); + for (idx, attr) in attrs.iter().enumerate() { + let found_attr = &mut found_attrs[idx]; + if !*found_attr { + // `attr.name` and` attr.token_kind` are checked against unexposed attributes only. + if attr.kind.map_or(false, |k| k == kind) || + (kind == CXCursor_UnexposedAttr && + cur.tokens().iter().any(|t| { + t.kind == attr.token_kind && + t.spelling() == attr.name + })) + { + *found_attr = true; + found_count += 1; + + if found_count == N { + return CXChildVisit_Break; + } + } + } + } + + CXChildVisit_Continue + }); + + found_attrs + } + + /// Given that this cursor's referent is a `typedef`, get the `Type` that is + /// being aliased. + pub(crate) fn typedef_type(&self) -> Option { + let inner = Type { + x: unsafe { clang_getTypedefDeclUnderlyingType(self.x) }, + }; + + if inner.is_valid() { + Some(inner) + } else { + None + } + } + + /// Get the linkage kind for this cursor's referent. + /// + /// This only applies to functions and variables. + pub(crate) fn linkage(&self) -> CXLinkageKind { + unsafe { clang_getCursorLinkage(self.x) } + } + + /// Get the visibility of this cursor's referent. + pub(crate) fn visibility(&self) -> CXVisibilityKind { + unsafe { clang_getCursorVisibility(self.x) } + } + + /// Given that this cursor's referent is a function, return cursors to its + /// parameters. + /// + /// Returns None if the cursor's referent is not a function/method call or + /// declaration. + pub(crate) fn args(&self) -> Option> { + // match self.kind() { + // CXCursor_FunctionDecl | + // CXCursor_CXXMethod => { + self.num_args().ok().map(|num| { + (0..num) + .map(|i| Cursor { + x: unsafe { clang_Cursor_getArgument(self.x, i as c_uint) }, + }) + .collect() + }) + } + + /// Given that this cursor's referent is a function/method call or + /// declaration, return the number of arguments it takes. + /// + /// Returns Err if the cursor's referent is not a function/method call or + /// declaration. + pub(crate) fn num_args(&self) -> Result { + unsafe { + let w = clang_Cursor_getNumArguments(self.x); + if w == -1 { + Err(()) + } else { + Ok(w as u32) + } + } + } + + /// Get the access specifier for this cursor's referent. + pub(crate) fn access_specifier(&self) -> CX_CXXAccessSpecifier { + unsafe { clang_getCXXAccessSpecifier(self.x) } + } + + /// Is the cursor's referrent publically accessible in C++? + /// + /// Returns true if self.access_specifier() is `CX_CXXPublic` or + /// `CX_CXXInvalidAccessSpecifier`. + pub(crate) fn public_accessible(&self) -> bool { + let access = self.access_specifier(); + access == CX_CXXPublic || access == CX_CXXInvalidAccessSpecifier + } + + /// Is this cursor's referent a field declaration that is marked as + /// `mutable`? + pub(crate) fn is_mutable_field(&self) -> bool { + unsafe { clang_CXXField_isMutable(self.x) != 0 } + } + + /// Get the offset of the field represented by the Cursor. + pub(crate) fn offset_of_field(&self) -> Result { + let offset = unsafe { clang_Cursor_getOffsetOfField(self.x) }; + + if offset < 0 { + Err(LayoutError::from(offset as i32)) + } else { + Ok(offset as usize) + } + } + + /// Is this cursor's referent a member function that is declared `static`? + pub(crate) fn method_is_static(&self) -> bool { + unsafe { clang_CXXMethod_isStatic(self.x) != 0 } + } + + /// Is this cursor's referent a member function that is declared `const`? + pub(crate) fn method_is_const(&self) -> bool { + unsafe { clang_CXXMethod_isConst(self.x) != 0 } + } + + /// Is this cursor's referent a member function that is virtual? + pub(crate) fn method_is_virtual(&self) -> bool { + unsafe { clang_CXXMethod_isVirtual(self.x) != 0 } + } + + /// Is this cursor's referent a member function that is pure virtual? + pub(crate) fn method_is_pure_virtual(&self) -> bool { + unsafe { clang_CXXMethod_isPureVirtual(self.x) != 0 } + } + + /// Is this cursor's referent a struct or class with virtual members? + pub(crate) fn is_virtual_base(&self) -> bool { + unsafe { clang_isVirtualBase(self.x) != 0 } + } + + /// Try to evaluate this cursor. + pub(crate) fn evaluate(&self) -> Option { + EvalResult::new(*self) + } + + /// Return the result type for this cursor + pub(crate) fn ret_type(&self) -> Option { + let rt = Type { + x: unsafe { clang_getCursorResultType(self.x) }, + }; + if rt.is_valid() { + Some(rt) + } else { + None + } + } + + /// Gets the tokens that correspond to that cursor. + pub(crate) fn tokens(&self) -> RawTokens { + RawTokens::new(self) + } + + /// Gets the tokens that correspond to that cursor as `cexpr` tokens. + pub(crate) fn cexpr_tokens(self) -> Vec { + self.tokens() + .iter() + .filter_map(|token| token.as_cexpr_token()) + .collect() + } + + /// Obtain the real path name of a cursor of InclusionDirective kind. + /// + /// Returns None if the cursor does not include a file, otherwise the file's full name + pub(crate) fn get_included_file_name(&self) -> Option { + let file = unsafe { clang_sys::clang_getIncludedFile(self.x) }; + if file.is_null() { + None + } else { + Some(unsafe { + cxstring_into_string(clang_sys::clang_getFileName(file)) + }) + } + } +} + +/// A struct that owns the tokenizer result from a given cursor. +pub(crate) struct RawTokens<'a> { + cursor: &'a Cursor, + tu: CXTranslationUnit, + tokens: *mut CXToken, + token_count: c_uint, +} + +impl<'a> RawTokens<'a> { + fn new(cursor: &'a Cursor) -> Self { + let mut tokens = ptr::null_mut(); + let mut token_count = 0; + let range = cursor.extent(); + let tu = unsafe { clang_Cursor_getTranslationUnit(cursor.x) }; + unsafe { clang_tokenize(tu, range, &mut tokens, &mut token_count) }; + Self { + cursor, + tu, + tokens, + token_count, + } + } + + fn as_slice(&self) -> &[CXToken] { + if self.tokens.is_null() { + return &[]; + } + unsafe { slice::from_raw_parts(self.tokens, self.token_count as usize) } + } + + /// Get an iterator over these tokens. + pub(crate) fn iter(&self) -> ClangTokenIterator { + ClangTokenIterator { + tu: self.tu, + raw: self.as_slice().iter(), + } + } +} + +impl<'a> Drop for RawTokens<'a> { + fn drop(&mut self) { + if !self.tokens.is_null() { + unsafe { + clang_disposeTokens( + self.tu, + self.tokens, + self.token_count as c_uint, + ); + } + } + } +} + +/// A raw clang token, that exposes only kind, spelling, and extent. This is a +/// slightly more convenient version of `CXToken` which owns the spelling +/// string and extent. +#[derive(Debug)] +pub(crate) struct ClangToken { + spelling: CXString, + /// The extent of the token. This is the same as the relevant member from + /// `CXToken`. + pub(crate) extent: CXSourceRange, + /// The kind of the token. This is the same as the relevant member from + /// `CXToken`. + pub(crate) kind: CXTokenKind, +} + +impl ClangToken { + /// Get the token spelling, without being converted to utf-8. + pub(crate) fn spelling(&self) -> &[u8] { + let c_str = unsafe { + CStr::from_ptr(clang_getCString(self.spelling) as *const _) + }; + c_str.to_bytes() + } + + /// Converts a ClangToken to a `cexpr` token if possible. + pub(crate) fn as_cexpr_token(&self) -> Option { + use cexpr::token; + + let kind = match self.kind { + CXToken_Punctuation => token::Kind::Punctuation, + CXToken_Literal => token::Kind::Literal, + CXToken_Identifier => token::Kind::Identifier, + CXToken_Keyword => token::Kind::Keyword, + // NB: cexpr is not too happy about comments inside + // expressions, so we strip them down here. + CXToken_Comment => return None, + _ => { + warn!("Found unexpected token kind: {:?}", self); + return None; + } + }; + + Some(token::Token { + kind, + raw: self.spelling().to_vec().into_boxed_slice(), + }) + } +} + +impl Drop for ClangToken { + fn drop(&mut self) { + unsafe { clang_disposeString(self.spelling) } + } +} + +/// An iterator over a set of Tokens. +pub(crate) struct ClangTokenIterator<'a> { + tu: CXTranslationUnit, + raw: slice::Iter<'a, CXToken>, +} + +impl<'a> Iterator for ClangTokenIterator<'a> { + type Item = ClangToken; + + fn next(&mut self) -> Option { + let raw = self.raw.next()?; + unsafe { + let kind = clang_getTokenKind(*raw); + let spelling = clang_getTokenSpelling(self.tu, *raw); + let extent = clang_getTokenExtent(self.tu, *raw); + Some(ClangToken { + kind, + extent, + spelling, + }) + } + } +} + +/// Checks whether the name looks like an identifier, i.e. is alphanumeric +/// (including '_') and does not start with a digit. +pub(crate) fn is_valid_identifier(name: &str) -> bool { + let mut chars = name.chars(); + let first_valid = chars + .next() + .map(|c| c.is_alphabetic() || c == '_') + .unwrap_or(false); + + first_valid && chars.all(|c| c.is_alphanumeric() || c == '_') +} + +extern "C" fn visit_children( + cur: CXCursor, + _parent: CXCursor, + data: CXClientData, +) -> CXChildVisitResult +where + Visitor: FnMut(Cursor) -> CXChildVisitResult, +{ + let func: &mut Visitor = unsafe { &mut *(data as *mut Visitor) }; + let child = Cursor { x: cur }; + + (*func)(child) +} + +impl PartialEq for Cursor { + fn eq(&self, other: &Cursor) -> bool { + unsafe { clang_equalCursors(self.x, other.x) == 1 } + } +} + +impl Eq for Cursor {} + +impl Hash for Cursor { + fn hash(&self, state: &mut H) { + unsafe { clang_hashCursor(self.x) }.hash(state) + } +} + +/// The type of a node in clang's AST. +#[derive(Clone, Copy)] +pub(crate) struct Type { + x: CXType, +} + +impl PartialEq for Type { + fn eq(&self, other: &Self) -> bool { + unsafe { clang_equalTypes(self.x, other.x) != 0 } + } +} + +impl Eq for Type {} + +impl fmt::Debug for Type { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!( + fmt, + "Type({}, kind: {}, cconv: {}, decl: {:?}, canon: {:?})", + self.spelling(), + type_to_str(self.kind()), + self.call_conv(), + self.declaration(), + self.declaration().canonical() + ) + } +} + +/// An error about the layout of a struct, class, or type. +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] +pub(crate) enum LayoutError { + /// Asked for the layout of an invalid type. + Invalid, + /// Asked for the layout of an incomplete type. + Incomplete, + /// Asked for the layout of a dependent type. + Dependent, + /// Asked for the layout of a type that does not have constant size. + NotConstantSize, + /// Asked for the layout of a field in a type that does not have such a + /// field. + InvalidFieldName, + /// An unknown layout error. + Unknown, +} + +impl ::std::convert::From for LayoutError { + fn from(val: i32) -> Self { + use self::LayoutError::*; + + match val { + CXTypeLayoutError_Invalid => Invalid, + CXTypeLayoutError_Incomplete => Incomplete, + CXTypeLayoutError_Dependent => Dependent, + CXTypeLayoutError_NotConstantSize => NotConstantSize, + CXTypeLayoutError_InvalidFieldName => InvalidFieldName, + _ => Unknown, + } + } +} + +impl Type { + /// Get this type's kind. + pub(crate) fn kind(&self) -> CXTypeKind { + self.x.kind + } + + /// Get a cursor pointing to this type's declaration. + pub(crate) fn declaration(&self) -> Cursor { + unsafe { + Cursor { + x: clang_getTypeDeclaration(self.x), + } + } + } + + /// Get the canonical declaration of this type, if it is available. + pub(crate) fn canonical_declaration( + &self, + location: Option<&Cursor>, + ) -> Option { + let mut declaration = self.declaration(); + if !declaration.is_valid() { + if let Some(location) = location { + let mut location = *location; + if let Some(referenced) = location.referenced() { + location = referenced; + } + if location.is_template_like() { + declaration = location; + } + } + } + + let canonical = declaration.canonical(); + if canonical.is_valid() && canonical.kind() != CXCursor_NoDeclFound { + Some(CanonicalTypeDeclaration(*self, canonical)) + } else { + None + } + } + + /// Get a raw display name for this type. + pub(crate) fn spelling(&self) -> String { + let s = unsafe { cxstring_into_string(clang_getTypeSpelling(self.x)) }; + // Clang 5.0 introduced changes in the spelling API so it returned the + // full qualified name. Let's undo that here. + if s.split("::").all(is_valid_identifier) { + if let Some(s) = s.split("::").last() { + return s.to_owned(); + } + } + + s + } + + /// Is this type const qualified? + pub(crate) fn is_const(&self) -> bool { + unsafe { clang_isConstQualifiedType(self.x) != 0 } + } + + #[inline] + fn is_non_deductible_auto_type(&self) -> bool { + debug_assert_eq!(self.kind(), CXType_Auto); + self.canonical_type() == *self + } + + #[inline] + fn clang_size_of(&self, ctx: &BindgenContext) -> c_longlong { + match self.kind() { + // Work-around https://bugs.llvm.org/show_bug.cgi?id=40975 + CXType_RValueReference | CXType_LValueReference => { + ctx.target_pointer_size() as c_longlong + } + // Work-around https://bugs.llvm.org/show_bug.cgi?id=40813 + CXType_Auto if self.is_non_deductible_auto_type() => -6, + _ => unsafe { clang_Type_getSizeOf(self.x) }, + } + } + + #[inline] + fn clang_align_of(&self, ctx: &BindgenContext) -> c_longlong { + match self.kind() { + // Work-around https://bugs.llvm.org/show_bug.cgi?id=40975 + CXType_RValueReference | CXType_LValueReference => { + ctx.target_pointer_size() as c_longlong + } + // Work-around https://bugs.llvm.org/show_bug.cgi?id=40813 + CXType_Auto if self.is_non_deductible_auto_type() => -6, + _ => unsafe { clang_Type_getAlignOf(self.x) }, + } + } + + /// What is the size of this type? Paper over invalid types by returning `0` + /// for them. + pub(crate) fn size(&self, ctx: &BindgenContext) -> usize { + let val = self.clang_size_of(ctx); + if val < 0 { + 0 + } else { + val as usize + } + } + + /// What is the size of this type? + pub(crate) fn fallible_size( + &self, + ctx: &BindgenContext, + ) -> Result { + let val = self.clang_size_of(ctx); + if val < 0 { + Err(LayoutError::from(val as i32)) + } else { + Ok(val as usize) + } + } + + /// What is the alignment of this type? Paper over invalid types by + /// returning `0`. + pub(crate) fn align(&self, ctx: &BindgenContext) -> usize { + let val = self.clang_align_of(ctx); + if val < 0 { + 0 + } else { + val as usize + } + } + + /// What is the alignment of this type? + pub(crate) fn fallible_align( + &self, + ctx: &BindgenContext, + ) -> Result { + let val = self.clang_align_of(ctx); + if val < 0 { + Err(LayoutError::from(val as i32)) + } else { + Ok(val as usize) + } + } + + /// Get the layout for this type, or an error describing why it does not + /// have a valid layout. + pub(crate) fn fallible_layout( + &self, + ctx: &BindgenContext, + ) -> Result { + use crate::ir::layout::Layout; + let size = self.fallible_size(ctx)?; + let align = self.fallible_align(ctx)?; + Ok(Layout::new(size, align)) + } + + /// Get the number of template arguments this type has, or `None` if it is + /// not some kind of template. + pub(crate) fn num_template_args(&self) -> Option { + let n = unsafe { clang_Type_getNumTemplateArguments(self.x) }; + if n >= 0 { + Some(n as u32) + } else { + debug_assert_eq!(n, -1); + None + } + } + + /// If this type is a class template specialization, return its + /// template arguments. Otherwise, return None. + pub(crate) fn template_args(&self) -> Option { + self.num_template_args().map(|n| TypeTemplateArgIterator { + x: self.x, + length: n, + index: 0, + }) + } + + /// Given that this type is a function prototype, return the types of its parameters. + /// + /// Returns None if the type is not a function prototype. + pub(crate) fn args(&self) -> Option> { + self.num_args().ok().map(|num| { + (0..num) + .map(|i| Type { + x: unsafe { clang_getArgType(self.x, i as c_uint) }, + }) + .collect() + }) + } + + /// Given that this type is a function prototype, return the number of arguments it takes. + /// + /// Returns Err if the type is not a function prototype. + pub(crate) fn num_args(&self) -> Result { + unsafe { + let w = clang_getNumArgTypes(self.x); + if w == -1 { + Err(()) + } else { + Ok(w as u32) + } + } + } + + /// Given that this type is a pointer type, return the type that it points + /// to. + pub(crate) fn pointee_type(&self) -> Option { + match self.kind() { + CXType_Pointer | + CXType_RValueReference | + CXType_LValueReference | + CXType_MemberPointer | + CXType_BlockPointer | + CXType_ObjCObjectPointer => { + let ret = Type { + x: unsafe { clang_getPointeeType(self.x) }, + }; + debug_assert!(ret.is_valid()); + Some(ret) + } + _ => None, + } + } + + /// Given that this type is an array, vector, or complex type, return the + /// type of its elements. + pub(crate) fn elem_type(&self) -> Option { + let current_type = Type { + x: unsafe { clang_getElementType(self.x) }, + }; + if current_type.is_valid() { + Some(current_type) + } else { + None + } + } + + /// Given that this type is an array or vector type, return its number of + /// elements. + pub(crate) fn num_elements(&self) -> Option { + let num_elements_returned = unsafe { clang_getNumElements(self.x) }; + if num_elements_returned != -1 { + Some(num_elements_returned as usize) + } else { + None + } + } + + /// Get the canonical version of this type. This sees through `typedef`s and + /// aliases to get the underlying, canonical type. + pub(crate) fn canonical_type(&self) -> Type { + unsafe { + Type { + x: clang_getCanonicalType(self.x), + } + } + } + + /// Is this type a variadic function type? + pub(crate) fn is_variadic(&self) -> bool { + unsafe { clang_isFunctionTypeVariadic(self.x) != 0 } + } + + /// Given that this type is a function type, get the type of its return + /// value. + pub(crate) fn ret_type(&self) -> Option { + let rt = Type { + x: unsafe { clang_getResultType(self.x) }, + }; + if rt.is_valid() { + Some(rt) + } else { + None + } + } + + /// Given that this type is a function type, get its calling convention. If + /// this is not a function type, `CXCallingConv_Invalid` is returned. + pub(crate) fn call_conv(&self) -> CXCallingConv { + unsafe { clang_getFunctionTypeCallingConv(self.x) } + } + + /// For elaborated types (types which use `class`, `struct`, or `union` to + /// disambiguate types from local bindings), get the underlying type. + pub(crate) fn named(&self) -> Type { + unsafe { + Type { + x: clang_Type_getNamedType(self.x), + } + } + } + + /// Is this a valid type? + pub(crate) fn is_valid(&self) -> bool { + self.kind() != CXType_Invalid + } + + /// Is this a valid and exposed type? + pub(crate) fn is_valid_and_exposed(&self) -> bool { + self.is_valid() && self.kind() != CXType_Unexposed + } + + /// Is this type a fully instantiated template? + pub(crate) fn is_fully_instantiated_template(&self) -> bool { + // Yep, the spelling of this containing type-parameter is extremely + // nasty... But can happen in . Unfortunately I couldn't + // reduce it enough :( + self.template_args().map_or(false, |args| args.len() > 0) && + !matches!( + self.declaration().kind(), + CXCursor_ClassTemplatePartialSpecialization | + CXCursor_TypeAliasTemplateDecl | + CXCursor_TemplateTemplateParameter + ) + } + + /// Is this type an associated template type? Eg `T::Associated` in + /// this example: + /// + /// ```c++ + /// template + /// class Foo { + /// typename T::Associated member; + /// }; + /// ``` + pub(crate) fn is_associated_type(&self) -> bool { + // This is terrible :( + fn hacky_parse_associated_type>(spelling: S) -> bool { + lazy_static! { + static ref ASSOC_TYPE_RE: regex::Regex = regex::Regex::new( + r"typename type\-parameter\-\d+\-\d+::.+" + ) + .unwrap(); + } + ASSOC_TYPE_RE.is_match(spelling.as_ref()) + } + + self.kind() == CXType_Unexposed && + (hacky_parse_associated_type(self.spelling()) || + hacky_parse_associated_type( + self.canonical_type().spelling(), + )) + } +} + +/// The `CanonicalTypeDeclaration` type exists as proof-by-construction that its +/// cursor is the canonical declaration for its type. If you have a +/// `CanonicalTypeDeclaration` instance, you know for sure that the type and +/// cursor match up in a canonical declaration relationship, and it simply +/// cannot be otherwise. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct CanonicalTypeDeclaration(Type, Cursor); + +impl CanonicalTypeDeclaration { + /// Get the type. + pub(crate) fn ty(&self) -> &Type { + &self.0 + } + + /// Get the type's canonical declaration cursor. + pub(crate) fn cursor(&self) -> &Cursor { + &self.1 + } +} + +/// An iterator for a type's template arguments. +pub(crate) struct TypeTemplateArgIterator { + x: CXType, + length: u32, + index: u32, +} + +impl Iterator for TypeTemplateArgIterator { + type Item = Type; + fn next(&mut self) -> Option { + if self.index < self.length { + let idx = self.index as c_uint; + self.index += 1; + Some(Type { + x: unsafe { clang_Type_getTemplateArgumentAsType(self.x, idx) }, + }) + } else { + None + } + } +} + +impl ExactSizeIterator for TypeTemplateArgIterator { + fn len(&self) -> usize { + assert!(self.index <= self.length); + (self.length - self.index) as usize + } +} + +/// A `SourceLocation` is a file, line, column, and byte offset location for +/// some source text. +pub(crate) struct SourceLocation { + x: CXSourceLocation, +} + +impl SourceLocation { + /// Get the (file, line, column, byte offset) tuple for this source + /// location. + pub(crate) fn location(&self) -> (File, usize, usize, usize) { + unsafe { + let mut file = mem::zeroed(); + let mut line = 0; + let mut col = 0; + let mut off = 0; + clang_getSpellingLocation( + self.x, &mut file, &mut line, &mut col, &mut off, + ); + (File { x: file }, line as usize, col as usize, off as usize) + } + } +} + +impl fmt::Display for SourceLocation { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (file, line, col, _) = self.location(); + if let Some(name) = file.name() { + write!(f, "{}:{}:{}", name, line, col) + } else { + "builtin definitions".fmt(f) + } + } +} + +impl fmt::Debug for SourceLocation { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + +/// A comment in the source text. +/// +/// Comments are sort of parsed by Clang, and have a tree structure. +pub(crate) struct Comment { + x: CXComment, +} + +impl Comment { + /// What kind of comment is this? + pub(crate) fn kind(&self) -> CXCommentKind { + unsafe { clang_Comment_getKind(self.x) } + } + + /// Get this comment's children comment + pub(crate) fn get_children(&self) -> CommentChildrenIterator { + CommentChildrenIterator { + parent: self.x, + length: unsafe { clang_Comment_getNumChildren(self.x) }, + index: 0, + } + } + + /// Given that this comment is the start or end of an HTML tag, get its tag + /// name. + pub(crate) fn get_tag_name(&self) -> String { + unsafe { cxstring_into_string(clang_HTMLTagComment_getTagName(self.x)) } + } + + /// Given that this comment is an HTML start tag, get its attributes. + pub(crate) fn get_tag_attrs(&self) -> CommentAttributesIterator { + CommentAttributesIterator { + x: self.x, + length: unsafe { clang_HTMLStartTag_getNumAttrs(self.x) }, + index: 0, + } + } +} + +/// An iterator for a comment's children +pub(crate) struct CommentChildrenIterator { + parent: CXComment, + length: c_uint, + index: c_uint, +} + +impl Iterator for CommentChildrenIterator { + type Item = Comment; + fn next(&mut self) -> Option { + if self.index < self.length { + let idx = self.index; + self.index += 1; + Some(Comment { + x: unsafe { clang_Comment_getChild(self.parent, idx) }, + }) + } else { + None + } + } +} + +/// An HTML start tag comment attribute +pub(crate) struct CommentAttribute { + /// HTML start tag attribute name + pub(crate) name: String, + /// HTML start tag attribute value + pub(crate) value: String, +} + +/// An iterator for a comment's attributes +pub(crate) struct CommentAttributesIterator { + x: CXComment, + length: c_uint, + index: c_uint, +} + +impl Iterator for CommentAttributesIterator { + type Item = CommentAttribute; + fn next(&mut self) -> Option { + if self.index < self.length { + let idx = self.index; + self.index += 1; + Some(CommentAttribute { + name: unsafe { + cxstring_into_string(clang_HTMLStartTag_getAttrName( + self.x, idx, + )) + }, + value: unsafe { + cxstring_into_string(clang_HTMLStartTag_getAttrValue( + self.x, idx, + )) + }, + }) + } else { + None + } + } +} + +/// A source file. +pub(crate) struct File { + x: CXFile, +} + +impl File { + /// Get the name of this source file. + pub(crate) fn name(&self) -> Option { + if self.x.is_null() { + return None; + } + Some(unsafe { cxstring_into_string(clang_getFileName(self.x)) }) + } +} + +fn cxstring_to_string_leaky(s: CXString) -> String { + if s.data.is_null() { + return "".to_owned(); + } + let c_str = unsafe { CStr::from_ptr(clang_getCString(s) as *const _) }; + c_str.to_string_lossy().into_owned() +} + +fn cxstring_into_string(s: CXString) -> String { + let ret = cxstring_to_string_leaky(s); + unsafe { clang_disposeString(s) }; + ret +} + +/// An `Index` is an environment for a set of translation units that will +/// typically end up linked together in one final binary. +pub(crate) struct Index { + x: CXIndex, +} + +impl Index { + /// Construct a new `Index`. + /// + /// The `pch` parameter controls whether declarations in pre-compiled + /// headers are included when enumerating a translation unit's "locals". + /// + /// The `diag` parameter controls whether debugging diagnostics are enabled. + pub(crate) fn new(pch: bool, diag: bool) -> Index { + unsafe { + Index { + x: clang_createIndex(pch as c_int, diag as c_int), + } + } + } +} + +impl fmt::Debug for Index { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "Index {{ }}") + } +} + +impl Drop for Index { + fn drop(&mut self) { + unsafe { + clang_disposeIndex(self.x); + } + } +} + +/// A translation unit (or "compilation unit"). +pub(crate) struct TranslationUnit { + x: CXTranslationUnit, +} + +impl fmt::Debug for TranslationUnit { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "TranslationUnit {{ }}") + } +} + +impl TranslationUnit { + /// Parse a source file into a translation unit. + pub(crate) fn parse( + ix: &Index, + file: &str, + cmd_args: &[String], + unsaved: &[UnsavedFile], + opts: CXTranslationUnit_Flags, + ) -> Option { + let fname = CString::new(file).unwrap(); + let _c_args: Vec = cmd_args + .iter() + .map(|s| CString::new(s.clone()).unwrap()) + .collect(); + let c_args: Vec<*const c_char> = + _c_args.iter().map(|s| s.as_ptr()).collect(); + let mut c_unsaved: Vec = + unsaved.iter().map(|f| f.x).collect(); + let tu = unsafe { + clang_parseTranslationUnit( + ix.x, + fname.as_ptr(), + c_args.as_ptr(), + c_args.len() as c_int, + c_unsaved.as_mut_ptr(), + c_unsaved.len() as c_uint, + opts, + ) + }; + if tu.is_null() { + None + } else { + Some(TranslationUnit { x: tu }) + } + } + + /// Get the Clang diagnostic information associated with this translation + /// unit. + pub(crate) fn diags(&self) -> Vec { + unsafe { + let num = clang_getNumDiagnostics(self.x) as usize; + let mut diags = vec![]; + for i in 0..num { + diags.push(Diagnostic { + x: clang_getDiagnostic(self.x, i as c_uint), + }); + } + diags + } + } + + /// Get a cursor pointing to the root of this translation unit's AST. + pub(crate) fn cursor(&self) -> Cursor { + unsafe { + Cursor { + x: clang_getTranslationUnitCursor(self.x), + } + } + } + + /// Is this the null translation unit? + pub(crate) fn is_null(&self) -> bool { + self.x.is_null() + } +} + +impl Drop for TranslationUnit { + fn drop(&mut self) { + unsafe { + clang_disposeTranslationUnit(self.x); + } + } +} + +/// A diagnostic message generated while parsing a translation unit. +pub(crate) struct Diagnostic { + x: CXDiagnostic, +} + +impl Diagnostic { + /// Format this diagnostic message as a string, using the given option bit + /// flags. + pub(crate) fn format(&self) -> String { + unsafe { + let opts = clang_defaultDiagnosticDisplayOptions(); + cxstring_into_string(clang_formatDiagnostic(self.x, opts)) + } + } + + /// What is the severity of this diagnostic message? + pub(crate) fn severity(&self) -> CXDiagnosticSeverity { + unsafe { clang_getDiagnosticSeverity(self.x) } + } +} + +impl Drop for Diagnostic { + /// Destroy this diagnostic message. + fn drop(&mut self) { + unsafe { + clang_disposeDiagnostic(self.x); + } + } +} + +/// A file which has not been saved to disk. +pub(crate) struct UnsavedFile { + x: CXUnsavedFile, + /// The name of the unsaved file. Kept here to avoid leaving dangling pointers in + /// `CXUnsavedFile`. + pub(crate) name: CString, + contents: CString, +} + +impl UnsavedFile { + /// Construct a new unsaved file with the given `name` and `contents`. + pub(crate) fn new(name: String, contents: String) -> UnsavedFile { + let name = CString::new(name).unwrap(); + let contents = CString::new(contents).unwrap(); + let x = CXUnsavedFile { + Filename: name.as_ptr(), + Contents: contents.as_ptr(), + Length: contents.as_bytes().len() as c_ulong, + }; + UnsavedFile { x, name, contents } + } +} + +impl fmt::Debug for UnsavedFile { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!( + fmt, + "UnsavedFile(name: {:?}, contents: {:?})", + self.name, self.contents + ) + } +} + +/// Convert a cursor kind into a static string. +pub(crate) fn kind_to_str(x: CXCursorKind) -> String { + unsafe { cxstring_into_string(clang_getCursorKindSpelling(x)) } +} + +/// Convert a type kind to a static string. +pub(crate) fn type_to_str(x: CXTypeKind) -> String { + unsafe { cxstring_into_string(clang_getTypeKindSpelling(x)) } +} + +/// Dump the Clang AST to stdout for debugging purposes. +pub(crate) fn ast_dump(c: &Cursor, depth: isize) -> CXChildVisitResult { + fn print_indent>(depth: isize, s: S) { + for _ in 0..depth { + print!(" "); + } + println!("{}", s.as_ref()); + } + + fn print_cursor>(depth: isize, prefix: S, c: &Cursor) { + let prefix = prefix.as_ref(); + print_indent( + depth, + format!(" {}kind = {}", prefix, kind_to_str(c.kind())), + ); + print_indent( + depth, + format!(" {}spelling = \"{}\"", prefix, c.spelling()), + ); + print_indent(depth, format!(" {}location = {}", prefix, c.location())); + print_indent( + depth, + format!(" {}is-definition? {}", prefix, c.is_definition()), + ); + print_indent( + depth, + format!(" {}is-declaration? {}", prefix, c.is_declaration()), + ); + print_indent( + depth, + format!( + " {}is-inlined-function? {}", + prefix, + c.is_inlined_function() + ), + ); + + let templ_kind = c.template_kind(); + if templ_kind != CXCursor_NoDeclFound { + print_indent( + depth, + format!( + " {}template-kind = {}", + prefix, + kind_to_str(templ_kind) + ), + ); + } + if let Some(usr) = c.usr() { + print_indent(depth, format!(" {}usr = \"{}\"", prefix, usr)); + } + if let Ok(num) = c.num_args() { + print_indent(depth, format!(" {}number-of-args = {}", prefix, num)); + } + if let Some(num) = c.num_template_args() { + print_indent( + depth, + format!(" {}number-of-template-args = {}", prefix, num), + ); + } + + if c.is_bit_field() { + let width = match c.bit_width() { + Some(w) => w.to_string(), + None => "".to_string(), + }; + print_indent(depth, format!(" {}bit-width = {}", prefix, width)); + } + + if let Some(ty) = c.enum_type() { + print_indent( + depth, + format!(" {}enum-type = {}", prefix, type_to_str(ty.kind())), + ); + } + if let Some(val) = c.enum_val_signed() { + print_indent(depth, format!(" {}enum-val = {}", prefix, val)); + } + if let Some(ty) = c.typedef_type() { + print_indent( + depth, + format!(" {}typedef-type = {}", prefix, type_to_str(ty.kind())), + ); + } + if let Some(ty) = c.ret_type() { + print_indent( + depth, + format!(" {}ret-type = {}", prefix, type_to_str(ty.kind())), + ); + } + + if let Some(refd) = c.referenced() { + if refd != *c { + println!(); + print_cursor( + depth, + String::from(prefix) + "referenced.", + &refd, + ); + } + } + + let canonical = c.canonical(); + if canonical != *c { + println!(); + print_cursor( + depth, + String::from(prefix) + "canonical.", + &canonical, + ); + } + + if let Some(specialized) = c.specialized() { + if specialized != *c { + println!(); + print_cursor( + depth, + String::from(prefix) + "specialized.", + &specialized, + ); + } + } + + if let Some(parent) = c.fallible_semantic_parent() { + println!(); + print_cursor( + depth, + String::from(prefix) + "semantic-parent.", + &parent, + ); + } + } + + fn print_type>(depth: isize, prefix: S, ty: &Type) { + let prefix = prefix.as_ref(); + + let kind = ty.kind(); + print_indent(depth, format!(" {}kind = {}", prefix, type_to_str(kind))); + if kind == CXType_Invalid { + return; + } + + print_indent(depth, format!(" {}cconv = {}", prefix, ty.call_conv())); + + print_indent( + depth, + format!(" {}spelling = \"{}\"", prefix, ty.spelling()), + ); + let num_template_args = + unsafe { clang_Type_getNumTemplateArguments(ty.x) }; + if num_template_args >= 0 { + print_indent( + depth, + format!( + " {}number-of-template-args = {}", + prefix, num_template_args + ), + ); + } + if let Some(num) = ty.num_elements() { + print_indent( + depth, + format!(" {}number-of-elements = {}", prefix, num), + ); + } + print_indent( + depth, + format!(" {}is-variadic? {}", prefix, ty.is_variadic()), + ); + + let canonical = ty.canonical_type(); + if canonical != *ty { + println!(); + print_type(depth, String::from(prefix) + "canonical.", &canonical); + } + + if let Some(pointee) = ty.pointee_type() { + if pointee != *ty { + println!(); + print_type(depth, String::from(prefix) + "pointee.", &pointee); + } + } + + if let Some(elem) = ty.elem_type() { + if elem != *ty { + println!(); + print_type(depth, String::from(prefix) + "elements.", &elem); + } + } + + if let Some(ret) = ty.ret_type() { + if ret != *ty { + println!(); + print_type(depth, String::from(prefix) + "return.", &ret); + } + } + + let named = ty.named(); + if named != *ty && named.is_valid() { + println!(); + print_type(depth, String::from(prefix) + "named.", &named); + } + } + + print_indent(depth, "("); + print_cursor(depth, "", c); + + println!(); + let ty = c.cur_type(); + print_type(depth, "type.", &ty); + + let declaration = ty.declaration(); + if declaration != *c && declaration.kind() != CXCursor_NoDeclFound { + println!(); + print_cursor(depth, "type.declaration.", &declaration); + } + + // Recurse. + let mut found_children = false; + c.visit(|s| { + if !found_children { + println!(); + found_children = true; + } + ast_dump(&s, depth + 1) + }); + + print_indent(depth, ")"); + + CXChildVisit_Continue +} + +/// Try to extract the clang version to a string +pub(crate) fn extract_clang_version() -> String { + unsafe { cxstring_into_string(clang_getClangVersion()) } +} + +/// A wrapper for the result of evaluating an expression. +#[derive(Debug)] +pub(crate) struct EvalResult { + x: CXEvalResult, + ty: Type, +} + +impl EvalResult { + /// Evaluate `cursor` and return the result. + pub(crate) fn new(cursor: Cursor) -> Option { + // Work around https://bugs.llvm.org/show_bug.cgi?id=42532, see: + // * https://github.com/rust-lang/rust-bindgen/issues/283 + // * https://github.com/rust-lang/rust-bindgen/issues/1590 + { + let mut found_cant_eval = false; + cursor.visit(|c| { + if c.kind() == CXCursor_TypeRef && + c.cur_type().canonical_type().kind() == CXType_Unexposed + { + found_cant_eval = true; + return CXChildVisit_Break; + } + + CXChildVisit_Recurse + }); + + if found_cant_eval { + return None; + } + } + Some(EvalResult { + x: unsafe { clang_Cursor_Evaluate(cursor.x) }, + ty: cursor.cur_type().canonical_type(), + }) + } + + fn kind(&self) -> CXEvalResultKind { + unsafe { clang_EvalResult_getKind(self.x) } + } + + /// Try to get back the result as a double. + pub(crate) fn as_double(&self) -> Option { + match self.kind() { + CXEval_Float => { + Some(unsafe { clang_EvalResult_getAsDouble(self.x) }) + } + _ => None, + } + } + + /// Try to get back the result as an integer. + pub(crate) fn as_int(&self) -> Option { + if self.kind() != CXEval_Int { + return None; + } + + if unsafe { clang_EvalResult_isUnsignedInt(self.x) } != 0 { + let value = unsafe { clang_EvalResult_getAsUnsigned(self.x) }; + if value > i64::max_value() as c_ulonglong { + return None; + } + + return Some(value as i64); + } + + let value = unsafe { clang_EvalResult_getAsLongLong(self.x) }; + if value > i64::max_value() as c_longlong { + return None; + } + if value < i64::min_value() as c_longlong { + return None; + } + #[allow(clippy::unnecessary_cast)] + Some(value as i64) + } + + /// Evaluates the expression as a literal string, that may or may not be + /// valid utf-8. + pub(crate) fn as_literal_string(&self) -> Option> { + if self.kind() != CXEval_StrLiteral { + return None; + } + + let char_ty = self.ty.pointee_type().or_else(|| self.ty.elem_type())?; + match char_ty.kind() { + CXType_Char_S | CXType_SChar | CXType_Char_U | CXType_UChar => { + let ret = unsafe { + CStr::from_ptr(clang_EvalResult_getAsStr(self.x)) + }; + Some(ret.to_bytes().to_vec()) + } + // FIXME: Support generating these. + CXType_Char16 => None, + CXType_Char32 => None, + CXType_WChar => None, + _ => None, + } + } +} + +impl Drop for EvalResult { + fn drop(&mut self) { + unsafe { clang_EvalResult_dispose(self.x) }; + } +} + +/// Target information obtained from libclang. +#[derive(Debug)] +pub(crate) struct TargetInfo { + /// The target triple. + pub(crate) triple: String, + /// The width of the pointer _in bits_. + pub(crate) pointer_width: usize, +} + +impl TargetInfo { + /// Tries to obtain target information from libclang. + pub(crate) fn new(tu: &TranslationUnit) -> Self { + let triple; + let pointer_width; + unsafe { + let ti = clang_getTranslationUnitTargetInfo(tu.x); + triple = cxstring_into_string(clang_TargetInfo_getTriple(ti)); + pointer_width = clang_TargetInfo_getPointerWidth(ti); + clang_TargetInfo_dispose(ti); + } + assert!(pointer_width > 0); + assert_eq!(pointer_width % 8, 0); + TargetInfo { + triple, + pointer_width: pointer_width as usize, + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/bitfield_unit.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/bitfield_unit.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/bitfield_unit.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/bitfield_unit.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,102 @@ +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct __BindgenBitfieldUnit { + storage: Storage, +} + +impl __BindgenBitfieldUnit { + #[inline] + pub const fn new(storage: Storage) -> Self { + Self { storage } + } +} + +impl __BindgenBitfieldUnit +where + Storage: AsRef<[u8]> + AsMut<[u8]>, +{ + #[inline] + pub fn get_bit(&self, index: usize) -> bool { + debug_assert!(index / 8 < self.storage.as_ref().len()); + + let byte_index = index / 8; + let byte = self.storage.as_ref()[byte_index]; + + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + + let mask = 1 << bit_index; + + byte & mask == mask + } + + #[inline] + pub fn set_bit(&mut self, index: usize, val: bool) { + debug_assert!(index / 8 < self.storage.as_ref().len()); + + let byte_index = index / 8; + let byte = &mut self.storage.as_mut()[byte_index]; + + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + + let mask = 1 << bit_index; + if val { + *byte |= mask; + } else { + *byte &= !mask; + } + } + + #[inline] + pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!( + (bit_offset + (bit_width as usize)) / 8 <= + self.storage.as_ref().len() + ); + + let mut val = 0; + + for i in 0..(bit_width as usize) { + if self.get_bit(i + bit_offset) { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + + val + } + + #[inline] + pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!( + (bit_offset + (bit_width as usize)) / 8 <= + self.storage.as_ref().len() + ); + + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + self.set_bit(index + bit_offset, val_bit_is_set); + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/bitfield_unit_tests.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/bitfield_unit_tests.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/bitfield_unit_tests.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/bitfield_unit_tests.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,260 @@ +//! Tests for `__BindgenBitfieldUnit`. +//! +//! Note that bit-fields are allocated right to left (least to most significant +//! bits). +//! +//! From the x86 PS ABI: +//! +//! ```c +//! struct { +//! int j : 5; +//! int k : 6; +//! int m : 7; +//! }; +//! ``` +//! +//! ```ignore +//! +------------------------------------------------------------+ +//! | | | | | +//! | padding | m | k | j | +//! |31 18|17 11|10 5|4 0| +//! +------------------------------------------------------------+ +//! ``` + +use super::bitfield_unit::__BindgenBitfieldUnit; + +#[test] +fn bitfield_unit_get_bit() { + let unit = __BindgenBitfieldUnit::<[u8; 2]>::new([0b10011101, 0b00011101]); + + let mut bits = vec![]; + for i in 0..16 { + bits.push(unit.get_bit(i)); + } + + println!(); + println!("bits = {:?}", bits); + assert_eq!( + bits, + &[ + // 0b10011101 + true, false, true, true, true, false, false, true, + // 0b00011101 + true, false, true, true, true, false, false, false + ] + ); +} + +#[test] +fn bitfield_unit_set_bit() { + let mut unit = + __BindgenBitfieldUnit::<[u8; 2]>::new([0b00000000, 0b00000000]); + + for i in 0..16 { + if i % 3 == 0 { + unit.set_bit(i, true); + } + } + + for i in 0..16 { + assert_eq!(unit.get_bit(i), i % 3 == 0); + } + + let mut unit = + __BindgenBitfieldUnit::<[u8; 2]>::new([0b11111111, 0b11111111]); + + for i in 0..16 { + if i % 3 == 0 { + unit.set_bit(i, false); + } + } + + for i in 0..16 { + assert_eq!(unit.get_bit(i), i % 3 != 0); + } +} + +macro_rules! bitfield_unit_get { + ( + $( + With $storage:expr , then get($start:expr, $len:expr) is $expected:expr; + )* + ) => { + #[test] + fn bitfield_unit_get() { + $({ + let expected = $expected; + let unit = __BindgenBitfieldUnit::<_>::new($storage); + let actual = unit.get($start, $len); + + println!(); + println!("expected = {:064b}", expected); + println!("actual = {:064b}", actual); + + assert_eq!(expected, actual); + })* + } + } +} + +bitfield_unit_get! { + // Let's just exhaustively test getting the bits from a single byte, since + // there are few enough combinations... + + With [0b11100010], then get(0, 1) is 0; + With [0b11100010], then get(1, 1) is 1; + With [0b11100010], then get(2, 1) is 0; + With [0b11100010], then get(3, 1) is 0; + With [0b11100010], then get(4, 1) is 0; + With [0b11100010], then get(5, 1) is 1; + With [0b11100010], then get(6, 1) is 1; + With [0b11100010], then get(7, 1) is 1; + + With [0b11100010], then get(0, 2) is 0b10; + With [0b11100010], then get(1, 2) is 0b01; + With [0b11100010], then get(2, 2) is 0b00; + With [0b11100010], then get(3, 2) is 0b00; + With [0b11100010], then get(4, 2) is 0b10; + With [0b11100010], then get(5, 2) is 0b11; + With [0b11100010], then get(6, 2) is 0b11; + + With [0b11100010], then get(0, 3) is 0b010; + With [0b11100010], then get(1, 3) is 0b001; + With [0b11100010], then get(2, 3) is 0b000; + With [0b11100010], then get(3, 3) is 0b100; + With [0b11100010], then get(4, 3) is 0b110; + With [0b11100010], then get(5, 3) is 0b111; + + With [0b11100010], then get(0, 4) is 0b0010; + With [0b11100010], then get(1, 4) is 0b0001; + With [0b11100010], then get(2, 4) is 0b1000; + With [0b11100010], then get(3, 4) is 0b1100; + With [0b11100010], then get(4, 4) is 0b1110; + + With [0b11100010], then get(0, 5) is 0b00010; + With [0b11100010], then get(1, 5) is 0b10001; + With [0b11100010], then get(2, 5) is 0b11000; + With [0b11100010], then get(3, 5) is 0b11100; + + With [0b11100010], then get(0, 6) is 0b100010; + With [0b11100010], then get(1, 6) is 0b110001; + With [0b11100010], then get(2, 6) is 0b111000; + + With [0b11100010], then get(0, 7) is 0b1100010; + With [0b11100010], then get(1, 7) is 0b1110001; + + With [0b11100010], then get(0, 8) is 0b11100010; + + // OK. Now let's test getting bits from across byte boundaries. + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(0, 16) is 0b1111111101010101; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(1, 16) is 0b0111111110101010; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(2, 16) is 0b0011111111010101; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(3, 16) is 0b0001111111101010; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(4, 16) is 0b0000111111110101; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(5, 16) is 0b0000011111111010; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(6, 16) is 0b0000001111111101; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(7, 16) is 0b0000000111111110; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(8, 16) is 0b0000000011111111; +} + +macro_rules! bitfield_unit_set { + ( + $( + set($start:expr, $len:expr, $val:expr) is $expected:expr; + )* + ) => { + #[test] + fn bitfield_unit_set() { + $( + let mut unit = __BindgenBitfieldUnit::<[u8; 4]>::new([0, 0, 0, 0]); + unit.set($start, $len, $val); + let actual = unit.get(0, 32); + + println!(); + println!("set({}, {}, {:032b}", $start, $len, $val); + println!("expected = {:064b}", $expected); + println!("actual = {:064b}", actual); + + assert_eq!($expected, actual); + )* + } + } +} + +bitfield_unit_set! { + // Once again, let's exhaustively test single byte combinations. + + set(0, 1, 0b11111111) is 0b00000001; + set(1, 1, 0b11111111) is 0b00000010; + set(2, 1, 0b11111111) is 0b00000100; + set(3, 1, 0b11111111) is 0b00001000; + set(4, 1, 0b11111111) is 0b00010000; + set(5, 1, 0b11111111) is 0b00100000; + set(6, 1, 0b11111111) is 0b01000000; + set(7, 1, 0b11111111) is 0b10000000; + + set(0, 2, 0b11111111) is 0b00000011; + set(1, 2, 0b11111111) is 0b00000110; + set(2, 2, 0b11111111) is 0b00001100; + set(3, 2, 0b11111111) is 0b00011000; + set(4, 2, 0b11111111) is 0b00110000; + set(5, 2, 0b11111111) is 0b01100000; + set(6, 2, 0b11111111) is 0b11000000; + + set(0, 3, 0b11111111) is 0b00000111; + set(1, 3, 0b11111111) is 0b00001110; + set(2, 3, 0b11111111) is 0b00011100; + set(3, 3, 0b11111111) is 0b00111000; + set(4, 3, 0b11111111) is 0b01110000; + set(5, 3, 0b11111111) is 0b11100000; + + set(0, 4, 0b11111111) is 0b00001111; + set(1, 4, 0b11111111) is 0b00011110; + set(2, 4, 0b11111111) is 0b00111100; + set(3, 4, 0b11111111) is 0b01111000; + set(4, 4, 0b11111111) is 0b11110000; + + set(0, 5, 0b11111111) is 0b00011111; + set(1, 5, 0b11111111) is 0b00111110; + set(2, 5, 0b11111111) is 0b01111100; + set(3, 5, 0b11111111) is 0b11111000; + + set(0, 6, 0b11111111) is 0b00111111; + set(1, 6, 0b11111111) is 0b01111110; + set(2, 6, 0b11111111) is 0b11111100; + + set(0, 7, 0b11111111) is 0b01111111; + set(1, 7, 0b11111111) is 0b11111110; + + set(0, 8, 0b11111111) is 0b11111111; + + // And, now let's cross byte boundaries. + + set(0, 16, 0b1111111111111111) is 0b00000000000000001111111111111111; + set(1, 16, 0b1111111111111111) is 0b00000000000000011111111111111110; + set(2, 16, 0b1111111111111111) is 0b00000000000000111111111111111100; + set(3, 16, 0b1111111111111111) is 0b00000000000001111111111111111000; + set(4, 16, 0b1111111111111111) is 0b00000000000011111111111111110000; + set(5, 16, 0b1111111111111111) is 0b00000000000111111111111111100000; + set(6, 16, 0b1111111111111111) is 0b00000000001111111111111111000000; + set(7, 16, 0b1111111111111111) is 0b00000000011111111111111110000000; + set(8, 16, 0b1111111111111111) is 0b00000000111111111111111100000000; +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/dyngen.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/dyngen.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/dyngen.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/dyngen.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,201 @@ +use crate::codegen; +use crate::ir::context::BindgenContext; +use crate::ir::function::ClangAbi; +use proc_macro2::Ident; + +/// Used to build the output tokens for dynamic bindings. +#[derive(Default)] +pub(crate) struct DynamicItems { + /// Tracks the tokens that will appears inside the library struct -- e.g.: + /// ```ignore + /// struct Lib { + /// __library: ::libloading::Library, + /// pub x: Result, // <- tracks these + /// ... + /// } + /// ``` + struct_members: Vec, + + /// Tracks the tokens that will appear inside the library struct's implementation, e.g.: + /// + /// ```ignore + /// impl Lib { + /// ... + /// pub unsafe fn foo(&self, ...) { // <- tracks these + /// ... + /// } + /// } + /// ``` + struct_implementation: Vec, + + /// Tracks the initialization of the fields inside the `::new` constructor of the library + /// struct, e.g.: + /// ```ignore + /// impl Lib { + /// + /// pub unsafe fn new

(path: P) -> Result + /// where + /// P: AsRef<::std::ffi::OsStr>, + /// { + /// ... + /// let foo = __library.get(...) ...; // <- tracks these + /// ... + /// } + /// + /// ... + /// } + /// ``` + constructor_inits: Vec, + + /// Tracks the information that is passed to the library struct at the end of the `::new` + /// constructor, e.g.: + /// ```ignore + /// impl LibFoo { + /// pub unsafe fn new

(path: P) -> Result + /// where + /// P: AsRef<::std::ffi::OsStr>, + /// { + /// ... + /// Ok(LibFoo { + /// __library: __library, + /// foo, + /// bar, // <- tracks these + /// ... + /// }) + /// } + /// } + /// ``` + init_fields: Vec, +} + +impl DynamicItems { + pub(crate) fn new() -> Self { + Self::default() + } + + pub(crate) fn get_tokens( + &self, + lib_ident: Ident, + ctx: &BindgenContext, + ) -> proc_macro2::TokenStream { + let struct_members = &self.struct_members; + let constructor_inits = &self.constructor_inits; + let init_fields = &self.init_fields; + let struct_implementation = &self.struct_implementation; + + let from_library = if ctx.options().wrap_unsafe_ops { + quote!(unsafe { Self::from_library(library) }) + } else { + quote!(Self::from_library(library)) + }; + + quote! { + extern crate libloading; + + pub struct #lib_ident { + __library: ::libloading::Library, + #(#struct_members)* + } + + impl #lib_ident { + pub unsafe fn new

( + path: P + ) -> Result + where P: AsRef<::std::ffi::OsStr> { + let library = ::libloading::Library::new(path)?; + #from_library + } + + pub unsafe fn from_library( + library: L + ) -> Result + where L: Into<::libloading::Library> { + let __library = library.into(); + #( #constructor_inits )* + Ok(#lib_ident { + __library, + #( #init_fields ),* + }) + } + + #( #struct_implementation )* + } + } + } + + #[allow(clippy::too_many_arguments)] + pub(crate) fn push( + &mut self, + ident: Ident, + abi: ClangAbi, + is_variadic: bool, + is_required: bool, + args: Vec, + args_identifiers: Vec, + ret: proc_macro2::TokenStream, + ret_ty: proc_macro2::TokenStream, + attributes: Vec, + ctx: &BindgenContext, + ) { + if !is_variadic { + assert_eq!(args.len(), args_identifiers.len()); + } + + let signature = quote! { unsafe extern #abi fn ( #( #args),* ) #ret }; + let member = if is_required { + signature + } else { + quote! { Result<#signature, ::libloading::Error> } + }; + + self.struct_members.push(quote! { + pub #ident: #member, + }); + + // N.B: If the signature was required, it won't be wrapped in a Result<...> + // and we can simply call it directly. + let fn_ = if is_required { + quote! { self.#ident } + } else { + quote! { self.#ident.as_ref().expect("Expected function, got error.") } + }; + let call_body = if ctx.options().wrap_unsafe_ops { + quote!(unsafe { (#fn_)(#( #args_identifiers ),*) }) + } else { + quote!((#fn_)(#( #args_identifiers ),*) ) + }; + + // We can't implement variadic functions from C easily, so we allow to + // access the function pointer so that the user can call it just fine. + if !is_variadic { + self.struct_implementation.push(quote! { + #(#attributes)* + pub unsafe fn #ident ( &self, #( #args ),* ) #ret_ty { + #call_body + } + }); + } + + // N.B: Unwrap the signature upon construction if it is required to be resolved. + let ident_str = codegen::helpers::ast_ty::cstr_expr(ident.to_string()); + let library_get = if ctx.options().wrap_unsafe_ops { + quote!(unsafe { __library.get(#ident_str) }) + } else { + quote!(__library.get(#ident_str)) + }; + + self.constructor_inits.push(if is_required { + quote! { + let #ident = #library_get.map(|sym| *sym)?; + } + } else { + quote! { + let #ident = #library_get.map(|sym| *sym); + } + }); + + self.init_fields.push(quote! { + #ident + }); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/error.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/error.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/error.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/error.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,33 @@ +use std::error; +use std::fmt; + +/// Errors that can occur during code generation. +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) enum Error { + /// Tried to generate an opaque blob for a type that did not have a layout. + NoLayoutForOpaqueBlob, + + /// Tried to instantiate an opaque template definition, or a template + /// definition that is too difficult for us to understand (like a partial + /// template specialization). + InstantiationOfOpaqueType, +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match *self { + Error::NoLayoutForOpaqueBlob => { + "Tried to generate an opaque blob, but had no layout" + } + Error::InstantiationOfOpaqueType => { + "Instantiation of opaque template type or partial template \ + specialization" + } + }) + } +} + +impl error::Error for Error {} + +/// A `Result` of `T` or an error of `bindgen::codegen::error::Error`. +pub(crate) type Result = ::std::result::Result; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/helpers.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/helpers.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/helpers.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/helpers.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,322 @@ +//! Helpers for code generation that don't need macro expansion. + +use crate::ir::context::BindgenContext; +use crate::ir::layout::Layout; +use proc_macro2::{Ident, Span, TokenStream}; +use quote::TokenStreamExt; + +pub(crate) mod attributes { + use proc_macro2::{Ident, Span, TokenStream}; + use std::{borrow::Cow, str::FromStr}; + + pub(crate) fn repr(which: &str) -> TokenStream { + let which = Ident::new(which, Span::call_site()); + quote! { + #[repr( #which )] + } + } + + pub(crate) fn repr_list(which_ones: &[&str]) -> TokenStream { + let which_ones = which_ones + .iter() + .cloned() + .map(|one| TokenStream::from_str(one).expect("repr to be valid")); + quote! { + #[repr( #( #which_ones ),* )] + } + } + + pub(crate) fn derives(which_ones: &[&str]) -> TokenStream { + let which_ones = which_ones + .iter() + .cloned() + .map(|one| TokenStream::from_str(one).expect("derive to be valid")); + quote! { + #[derive( #( #which_ones ),* )] + } + } + + pub(crate) fn inline() -> TokenStream { + quote! { + #[inline] + } + } + + pub(crate) fn must_use() -> TokenStream { + quote! { + #[must_use] + } + } + + pub(crate) fn non_exhaustive() -> TokenStream { + quote! { + #[non_exhaustive] + } + } + + pub(crate) fn doc(comment: String) -> TokenStream { + if comment.is_empty() { + quote!() + } else { + quote!(#[doc = #comment]) + } + } + + pub(crate) fn link_name(name: &str) -> TokenStream { + // LLVM mangles the name by default but it's already mangled. + // Prefixing the name with \u{1} should tell LLVM to not mangle it. + let name: Cow<'_, str> = if MANGLE { + name.into() + } else { + format!("\u{1}{}", name).into() + }; + + quote! { + #[link_name = #name] + } + } +} + +/// Generates a proper type for a field or type with a given `Layout`, that is, +/// a type with the correct size and alignment restrictions. +pub(crate) fn blob(ctx: &BindgenContext, layout: Layout) -> TokenStream { + let opaque = layout.opaque(); + + // FIXME(emilio, #412): We fall back to byte alignment, but there are + // some things that legitimately are more than 8-byte aligned. + // + // Eventually we should be able to `unwrap` here, but... + let ty_name = match opaque.known_rust_type_for_array(ctx) { + Some(ty) => ty, + None => { + warn!("Found unknown alignment on code generation!"); + "u8" + } + }; + + let ty_name = Ident::new(ty_name, Span::call_site()); + + let data_len = opaque.array_size(ctx).unwrap_or(layout.size); + + if data_len == 1 { + quote! { + #ty_name + } + } else { + quote! { + [ #ty_name ; #data_len ] + } + } +} + +/// Integer type of the same size as the given `Layout`. +pub(crate) fn integer_type( + ctx: &BindgenContext, + layout: Layout, +) -> Option { + let name = Layout::known_type_for_size(ctx, layout.size)?; + let name = Ident::new(name, Span::call_site()); + Some(quote! { #name }) +} + +/// Generates a bitfield allocation unit type for a type with the given `Layout`. +pub(crate) fn bitfield_unit( + ctx: &BindgenContext, + layout: Layout, +) -> TokenStream { + let mut tokens = quote! {}; + + if ctx.options().enable_cxx_namespaces { + tokens.append_all(quote! { root:: }); + } + + let size = layout.size; + tokens.append_all(quote! { + __BindgenBitfieldUnit<[u8; #size]> + }); + + tokens +} + +pub(crate) mod ast_ty { + use crate::ir::context::BindgenContext; + use crate::ir::function::FunctionSig; + use crate::ir::layout::Layout; + use crate::ir::ty::FloatKind; + use proc_macro2::{self, TokenStream}; + use std::str::FromStr; + + pub(crate) fn c_void(ctx: &BindgenContext) -> TokenStream { + // ctypes_prefix takes precedence + match ctx.options().ctypes_prefix { + Some(ref prefix) => { + let prefix = TokenStream::from_str(prefix.as_str()).unwrap(); + quote! { + #prefix::c_void + } + } + None => { + if ctx.options().use_core && + ctx.options().rust_features.core_ffi_c_void + { + quote! { ::core::ffi::c_void } + } else { + quote! { ::std::os::raw::c_void } + } + } + } + } + + pub(crate) fn raw_type(ctx: &BindgenContext, name: &str) -> TokenStream { + let ident = ctx.rust_ident_raw(name); + match ctx.options().ctypes_prefix { + Some(ref prefix) => { + let prefix = TokenStream::from_str(prefix.as_str()).unwrap(); + quote! { + #prefix::#ident + } + } + None => { + if ctx.options().use_core && + ctx.options().rust_features().core_ffi_c + { + quote! { + ::core::ffi::#ident + } + } else { + quote! { + ::std::os::raw::#ident + } + } + } + } + } + + pub(crate) fn float_kind_rust_type( + ctx: &BindgenContext, + fk: FloatKind, + layout: Option, + ) -> TokenStream { + // TODO: we probably should take the type layout into account more + // often? + // + // Also, maybe this one shouldn't be the default? + match (fk, ctx.options().convert_floats) { + (FloatKind::Float, true) => quote! { f32 }, + (FloatKind::Double, true) => quote! { f64 }, + (FloatKind::Float, false) => raw_type(ctx, "c_float"), + (FloatKind::Double, false) => raw_type(ctx, "c_double"), + (FloatKind::LongDouble, _) => { + match layout { + Some(layout) => { + match layout.size { + 4 => quote! { f32 }, + 8 => quote! { f64 }, + // TODO(emilio): If rust ever gains f128 we should + // use it here and below. + _ => super::integer_type(ctx, layout) + .unwrap_or(quote! { f64 }), + } + } + None => { + debug_assert!( + false, + "How didn't we know the layout for a primitive type?" + ); + quote! { f64 } + } + } + } + (FloatKind::Float128, _) => { + if ctx.options().rust_features.i128_and_u128 { + quote! { u128 } + } else { + quote! { [u64; 2] } + } + } + } + } + + pub(crate) fn int_expr(val: i64) -> TokenStream { + // Don't use quote! { #val } because that adds the type suffix. + let val = proc_macro2::Literal::i64_unsuffixed(val); + quote!(#val) + } + + pub(crate) fn uint_expr(val: u64) -> TokenStream { + // Don't use quote! { #val } because that adds the type suffix. + let val = proc_macro2::Literal::u64_unsuffixed(val); + quote!(#val) + } + + pub(crate) fn byte_array_expr(bytes: &[u8]) -> TokenStream { + let mut bytes: Vec<_> = bytes.to_vec(); + bytes.push(0); + quote! { [ #(#bytes),* ] } + } + + pub(crate) fn cstr_expr(mut string: String) -> TokenStream { + string.push('\0'); + let b = proc_macro2::Literal::byte_string(string.as_bytes()); + quote! { + #b + } + } + + pub(crate) fn float_expr( + ctx: &BindgenContext, + f: f64, + ) -> Result { + if f.is_finite() { + let val = proc_macro2::Literal::f64_unsuffixed(f); + + return Ok(quote!(#val)); + } + + let prefix = ctx.trait_prefix(); + + if f.is_nan() { + return Ok(quote! { + ::#prefix::f64::NAN + }); + } + + if f.is_infinite() { + return Ok(if f.is_sign_positive() { + quote! { + ::#prefix::f64::INFINITY + } + } else { + quote! { + ::#prefix::f64::NEG_INFINITY + } + }); + } + + warn!("Unknown non-finite float number: {:?}", f); + Err(()) + } + + pub(crate) fn arguments_from_signature( + signature: &FunctionSig, + ctx: &BindgenContext, + ) -> Vec { + let mut unnamed_arguments = 0; + signature + .argument_types() + .iter() + .map(|&(ref name, _ty)| match *name { + Some(ref name) => { + let name = ctx.rust_ident(name); + quote! { #name } + } + None => { + unnamed_arguments += 1; + let name = + ctx.rust_ident(format!("arg{}", unnamed_arguments)); + quote! { #name } + } + }) + .collect() + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/impl_debug.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/impl_debug.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/impl_debug.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/impl_debug.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,245 @@ +use crate::ir::comp::{BitfieldUnit, CompKind, Field, FieldData, FieldMethods}; +use crate::ir::context::BindgenContext; +use crate::ir::item::{HasTypeParamInArray, IsOpaque, Item, ItemCanonicalName}; +use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT}; + +pub(crate) fn gen_debug_impl( + ctx: &BindgenContext, + fields: &[Field], + item: &Item, + kind: CompKind, +) -> proc_macro2::TokenStream { + let struct_name = item.canonical_name(ctx); + let mut format_string = format!("{} {{{{ ", struct_name); + let mut tokens = vec![]; + + if item.is_opaque(ctx, &()) { + format_string.push_str("opaque"); + } else { + match kind { + CompKind::Union => { + format_string.push_str("union"); + } + CompKind::Struct => { + let processed_fields = fields.iter().filter_map(|f| match f { + Field::DataMember(ref fd) => fd.impl_debug(ctx, ()), + Field::Bitfields(ref bu) => bu.impl_debug(ctx, ()), + }); + + for (i, (fstring, toks)) in processed_fields.enumerate() { + if i > 0 { + format_string.push_str(", "); + } + tokens.extend(toks); + format_string.push_str(&fstring); + } + } + } + } + + format_string.push_str(" }}"); + tokens.insert(0, quote! { #format_string }); + + let prefix = ctx.trait_prefix(); + + quote! { + fn fmt(&self, f: &mut ::#prefix::fmt::Formatter<'_>) -> ::#prefix ::fmt::Result { + write!(f, #( #tokens ),*) + } + } +} + +/// A trait for the things which we can codegen tokens that contribute towards a +/// generated `impl Debug`. +pub(crate) trait ImplDebug<'a> { + /// Any extra parameter required by this a particular `ImplDebug` implementation. + type Extra; + + /// Generate a format string snippet to be included in the larger `impl Debug` + /// format string, and the code to get the format string's interpolation values. + fn impl_debug( + &self, + ctx: &BindgenContext, + extra: Self::Extra, + ) -> Option<(String, Vec)>; +} + +impl<'a> ImplDebug<'a> for FieldData { + type Extra = (); + + fn impl_debug( + &self, + ctx: &BindgenContext, + _: Self::Extra, + ) -> Option<(String, Vec)> { + if let Some(name) = self.name() { + ctx.resolve_item(self.ty()).impl_debug(ctx, name) + } else { + None + } + } +} + +impl<'a> ImplDebug<'a> for BitfieldUnit { + type Extra = (); + + fn impl_debug( + &self, + ctx: &BindgenContext, + _: Self::Extra, + ) -> Option<(String, Vec)> { + let mut format_string = String::new(); + let mut tokens = vec![]; + for (i, bitfield) in self.bitfields().iter().enumerate() { + if i > 0 { + format_string.push_str(", "); + } + + if let Some(bitfield_name) = bitfield.name() { + format_string.push_str(&format!("{} : {{:?}}", bitfield_name)); + let getter_name = bitfield.getter_name(); + let name_ident = ctx.rust_ident_raw(getter_name); + tokens.push(quote! { + self.#name_ident () + }); + } + } + + Some((format_string, tokens)) + } +} + +impl<'a> ImplDebug<'a> for Item { + type Extra = &'a str; + + fn impl_debug( + &self, + ctx: &BindgenContext, + name: &str, + ) -> Option<(String, Vec)> { + let name_ident = ctx.rust_ident(name); + + // We don't know if blocklisted items `impl Debug` or not, so we can't + // add them to the format string we're building up. + if !ctx.allowlisted_items().contains(&self.id()) { + return None; + } + + let ty = match self.as_type() { + Some(ty) => ty, + None => { + return None; + } + }; + + fn debug_print( + name: &str, + name_ident: proc_macro2::TokenStream, + ) -> Option<(String, Vec)> { + Some(( + format!("{}: {{:?}}", name), + vec![quote! { + self.#name_ident + }], + )) + } + + match *ty.kind() { + // Handle the simple cases. + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Complex(..) | + TypeKind::Function(..) | + TypeKind::Enum(..) | + TypeKind::Reference(..) | + TypeKind::UnresolvedTypeRef(..) | + TypeKind::ObjCInterface(..) | + TypeKind::ObjCId | + TypeKind::Comp(..) | + TypeKind::ObjCSel => debug_print(name, quote! { #name_ident }), + + TypeKind::TemplateInstantiation(ref inst) => { + if inst.is_opaque(ctx, self) { + Some((format!("{}: opaque", name), vec![])) + } else { + debug_print(name, quote! { #name_ident }) + } + } + + // The generic is not required to implement Debug, so we can not debug print that type + TypeKind::TypeParam => { + Some((format!("{}: Non-debuggable generic", name), vec![])) + } + + TypeKind::Array(_, len) => { + // Generics are not required to implement Debug + if self.has_type_param_in_array(ctx) { + Some(( + format!("{}: Array with length {}", name, len), + vec![], + )) + } else if len < RUST_DERIVE_IN_ARRAY_LIMIT || + ctx.options().rust_features().larger_arrays + { + // The simple case + debug_print(name, quote! { #name_ident }) + } else if ctx.options().use_core { + // There is no String in core; reducing field visibility to avoid breaking + // no_std setups. + Some((format!("{}: [...]", name), vec![])) + } else { + // Let's implement our own print function + Some(( + format!("{}: [{{}}]", name), + vec![quote! { + self.#name_ident + .iter() + .enumerate() + .map(|(i, v)| format!("{}{:?}", if i > 0 { ", " } else { "" }, v)) + .collect::() + }], + )) + } + } + TypeKind::Vector(_, len) => { + if ctx.options().use_core { + // There is no format! in core; reducing field visibility to avoid breaking + // no_std setups. + Some((format!("{}(...)", name), vec![])) + } else { + let self_ids = 0..len; + Some(( + format!("{}({{}})", name), + vec![quote! { + #(format!("{:?}", self.#self_ids)),* + }], + )) + } + } + + TypeKind::ResolvedTypeRef(t) | + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::BlockPointer(t) => { + // We follow the aliases + ctx.resolve_item(t).impl_debug(ctx, name) + } + + TypeKind::Pointer(inner) => { + let inner_type = ctx.resolve_type(inner).canonical_type(ctx); + match *inner_type.kind() { + TypeKind::Function(ref sig) + if !sig.function_pointers_can_derive() => + { + Some((format!("{}: FunctionPointer", name), vec![])) + } + _ => debug_print(name, quote! { #name_ident }), + } + } + + TypeKind::Opaque => None, + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/impl_partialeq.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/impl_partialeq.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/impl_partialeq.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/impl_partialeq.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,142 @@ +use crate::ir::comp::{CompInfo, CompKind, Field, FieldMethods}; +use crate::ir::context::BindgenContext; +use crate::ir::item::{IsOpaque, Item}; +use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT}; + +/// Generate a manual implementation of `PartialEq` trait for the +/// specified compound type. +pub(crate) fn gen_partialeq_impl( + ctx: &BindgenContext, + comp_info: &CompInfo, + item: &Item, + ty_for_impl: &proc_macro2::TokenStream, +) -> Option { + let mut tokens = vec![]; + + if item.is_opaque(ctx, &()) { + tokens.push(quote! { + &self._bindgen_opaque_blob[..] == &other._bindgen_opaque_blob[..] + }); + } else if comp_info.kind() == CompKind::Union { + assert!(!ctx.options().untagged_union); + tokens.push(quote! { + &self.bindgen_union_field[..] == &other.bindgen_union_field[..] + }); + } else { + for base in comp_info.base_members().iter() { + if !base.requires_storage(ctx) { + continue; + } + + let ty_item = ctx.resolve_item(base.ty); + let field_name = &base.field_name; + + if ty_item.is_opaque(ctx, &()) { + let field_name = ctx.rust_ident(field_name); + tokens.push(quote! { + &self. #field_name [..] == &other. #field_name [..] + }); + } else { + tokens.push(gen_field(ctx, ty_item, field_name)); + } + } + + for field in comp_info.fields() { + match *field { + Field::DataMember(ref fd) => { + let ty_item = ctx.resolve_item(fd.ty()); + let name = fd.name().unwrap(); + tokens.push(gen_field(ctx, ty_item, name)); + } + Field::Bitfields(ref bu) => { + for bitfield in bu.bitfields() { + if bitfield.name().is_some() { + let getter_name = bitfield.getter_name(); + let name_ident = ctx.rust_ident_raw(getter_name); + tokens.push(quote! { + self.#name_ident () == other.#name_ident () + }); + } + } + } + } + } + } + + Some(quote! { + fn eq(&self, other: & #ty_for_impl) -> bool { + #( #tokens )&&* + } + }) +} + +fn gen_field( + ctx: &BindgenContext, + ty_item: &Item, + name: &str, +) -> proc_macro2::TokenStream { + fn quote_equals( + name_ident: proc_macro2::Ident, + ) -> proc_macro2::TokenStream { + quote! { self.#name_ident == other.#name_ident } + } + + let name_ident = ctx.rust_ident(name); + let ty = ty_item.expect_type(); + + match *ty.kind() { + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Complex(..) | + TypeKind::Float(..) | + TypeKind::Enum(..) | + TypeKind::TypeParam | + TypeKind::UnresolvedTypeRef(..) | + TypeKind::Reference(..) | + TypeKind::ObjCInterface(..) | + TypeKind::ObjCId | + TypeKind::ObjCSel | + TypeKind::Comp(..) | + TypeKind::Pointer(_) | + TypeKind::Function(..) | + TypeKind::Opaque => quote_equals(name_ident), + + TypeKind::TemplateInstantiation(ref inst) => { + if inst.is_opaque(ctx, ty_item) { + quote! { + &self. #name_ident [..] == &other. #name_ident [..] + } + } else { + quote_equals(name_ident) + } + } + + TypeKind::Array(_, len) => { + if len <= RUST_DERIVE_IN_ARRAY_LIMIT || + ctx.options().rust_features().larger_arrays + { + quote_equals(name_ident) + } else { + quote! { + &self. #name_ident [..] == &other. #name_ident [..] + } + } + } + TypeKind::Vector(_, len) => { + let self_ids = 0..len; + let other_ids = 0..len; + quote! { + #(self.#self_ids == other.#other_ids &&)* true + } + } + + TypeKind::ResolvedTypeRef(t) | + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::BlockPointer(t) => { + let inner_item = ctx.resolve_item(t); + gen_field(ctx, inner_item, name) + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/mod.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,5366 @@ +mod dyngen; +mod error; +mod helpers; +mod impl_debug; +mod impl_partialeq; +mod postprocessing; +mod serialize; +pub(crate) mod struct_layout; + +#[cfg(test)] +#[allow(warnings)] +pub(crate) mod bitfield_unit; +#[cfg(all(test, target_endian = "little"))] +mod bitfield_unit_tests; + +use self::dyngen::DynamicItems; +use self::helpers::attributes; +use self::struct_layout::StructLayoutTracker; + +use super::BindgenOptions; + +use crate::callbacks::{DeriveInfo, TypeKind as DeriveTypeKind}; +use crate::ir::analysis::{HasVtable, Sizedness}; +use crate::ir::annotations::{ + Annotations, FieldAccessorKind, FieldVisibilityKind, +}; +use crate::ir::comp::{ + Bitfield, BitfieldUnit, CompInfo, CompKind, Field, FieldData, FieldMethods, + Method, MethodKind, +}; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::derive::{ + CanDerive, CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, + CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, +}; +use crate::ir::dot; +use crate::ir::enum_ty::{Enum, EnumVariant, EnumVariantValue}; +use crate::ir::function::{ + Abi, ClangAbi, Function, FunctionKind, FunctionSig, Linkage, +}; +use crate::ir::int::IntKind; +use crate::ir::item::{IsOpaque, Item, ItemCanonicalName, ItemCanonicalPath}; +use crate::ir::item_kind::ItemKind; +use crate::ir::layout::Layout; +use crate::ir::module::Module; +use crate::ir::objc::{ObjCInterface, ObjCMethod}; +use crate::ir::template::{ + AsTemplateParam, TemplateInstantiation, TemplateParameters, +}; +use crate::ir::ty::{Type, TypeKind}; +use crate::ir::var::Var; + +use proc_macro2::{self, Ident, Span}; +use quote::TokenStreamExt; + +use crate::{Entry, HashMap, HashSet}; +use std::borrow::Cow; +use std::cell::Cell; +use std::collections::VecDeque; +use std::fmt::{self, Write}; +use std::ops; +use std::str::FromStr; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum CodegenError { + Serialize { msg: String, loc: String }, + Io(String), +} + +impl From for CodegenError { + fn from(err: std::io::Error) -> Self { + Self::Io(err.to_string()) + } +} + +impl fmt::Display for CodegenError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Serialize { msg, loc } => { + write!(f, "serialization error at {}: {}", loc, msg) + } + Self::Io(err) => err.fmt(f), + } + } +} + +// Name of type defined in constified enum module +pub(crate) static CONSTIFIED_ENUM_MODULE_REPR_NAME: &str = "Type"; + +fn top_level_path( + ctx: &BindgenContext, + item: &Item, +) -> Vec { + let mut path = vec![quote! { self }]; + + if ctx.options().enable_cxx_namespaces { + for _ in 0..item.codegen_depth(ctx) { + path.push(quote! { super }); + } + } + + path +} + +fn root_import( + ctx: &BindgenContext, + module: &Item, +) -> proc_macro2::TokenStream { + assert!(ctx.options().enable_cxx_namespaces, "Somebody messed it up"); + assert!(module.is_module()); + + let mut path = top_level_path(ctx, module); + + let root = ctx.root_module().canonical_name(ctx); + let root_ident = ctx.rust_ident(root); + path.push(quote! { #root_ident }); + + let mut tokens = quote! {}; + tokens.append_separated(path, quote!(::)); + + quote! { + #[allow(unused_imports)] + use #tokens ; + } +} + +bitflags! { + struct DerivableTraits: u16 { + const DEBUG = 1 << 0; + const DEFAULT = 1 << 1; + const COPY = 1 << 2; + const CLONE = 1 << 3; + const HASH = 1 << 4; + const PARTIAL_ORD = 1 << 5; + const ORD = 1 << 6; + const PARTIAL_EQ = 1 << 7; + const EQ = 1 << 8; + } +} + +fn derives_of_item( + item: &Item, + ctx: &BindgenContext, + packed: bool, +) -> DerivableTraits { + let mut derivable_traits = DerivableTraits::empty(); + + let all_template_params = item.all_template_params(ctx); + + if item.can_derive_copy(ctx) && !item.annotations().disallow_copy() { + derivable_traits |= DerivableTraits::COPY; + + if ctx.options().rust_features().builtin_clone_impls || + !all_template_params.is_empty() + { + // FIXME: This requires extra logic if you have a big array in a + // templated struct. The reason for this is that the magic: + // fn clone(&self) -> Self { *self } + // doesn't work for templates. + // + // It's not hard to fix though. + derivable_traits |= DerivableTraits::CLONE; + } + } else if packed { + // If the struct or union is packed, deriving from Copy is required for + // deriving from any other trait. + return derivable_traits; + } + + if item.can_derive_debug(ctx) && !item.annotations().disallow_debug() { + derivable_traits |= DerivableTraits::DEBUG; + } + + if item.can_derive_default(ctx) && !item.annotations().disallow_default() { + derivable_traits |= DerivableTraits::DEFAULT; + } + + if item.can_derive_hash(ctx) { + derivable_traits |= DerivableTraits::HASH; + } + + if item.can_derive_partialord(ctx) { + derivable_traits |= DerivableTraits::PARTIAL_ORD; + } + + if item.can_derive_ord(ctx) { + derivable_traits |= DerivableTraits::ORD; + } + + if item.can_derive_partialeq(ctx) { + derivable_traits |= DerivableTraits::PARTIAL_EQ; + } + + if item.can_derive_eq(ctx) { + derivable_traits |= DerivableTraits::EQ; + } + + derivable_traits +} + +impl From for Vec<&'static str> { + fn from(derivable_traits: DerivableTraits) -> Vec<&'static str> { + [ + (DerivableTraits::DEBUG, "Debug"), + (DerivableTraits::DEFAULT, "Default"), + (DerivableTraits::COPY, "Copy"), + (DerivableTraits::CLONE, "Clone"), + (DerivableTraits::HASH, "Hash"), + (DerivableTraits::PARTIAL_ORD, "PartialOrd"), + (DerivableTraits::ORD, "Ord"), + (DerivableTraits::PARTIAL_EQ, "PartialEq"), + (DerivableTraits::EQ, "Eq"), + ] + .iter() + .filter_map(|&(flag, derive)| { + Some(derive).filter(|_| derivable_traits.contains(flag)) + }) + .collect() + } +} + +struct CodegenResult<'a> { + items: Vec, + dynamic_items: DynamicItems, + + /// A monotonic counter used to add stable unique ID's to stuff that doesn't + /// need to be referenced by anything. + codegen_id: &'a Cell, + + /// Whether a bindgen union has been generated at least once. + saw_bindgen_union: bool, + + /// Whether an incomplete array has been generated at least once. + saw_incomplete_array: bool, + + /// Whether Objective C types have been seen at least once. + saw_objc: bool, + + /// Whether Apple block types have been seen at least once. + saw_block: bool, + + /// Whether a bitfield allocation unit has been seen at least once. + saw_bitfield_unit: bool, + + items_seen: HashSet, + /// The set of generated function/var names, needed because in C/C++ is + /// legal to do something like: + /// + /// ```c++ + /// extern "C" { + /// void foo(); + /// extern int bar; + /// } + /// + /// extern "C" { + /// void foo(); + /// extern int bar; + /// } + /// ``` + /// + /// Being these two different declarations. + functions_seen: HashSet, + vars_seen: HashSet, + + /// Used for making bindings to overloaded functions. Maps from a canonical + /// function name to the number of overloads we have already codegen'd for + /// that name. This lets us give each overload a unique suffix. + overload_counters: HashMap, + + items_to_serialize: Vec, +} + +impl<'a> CodegenResult<'a> { + fn new(codegen_id: &'a Cell) -> Self { + CodegenResult { + items: vec![], + dynamic_items: DynamicItems::new(), + saw_bindgen_union: false, + saw_incomplete_array: false, + saw_objc: false, + saw_block: false, + saw_bitfield_unit: false, + codegen_id, + items_seen: Default::default(), + functions_seen: Default::default(), + vars_seen: Default::default(), + overload_counters: Default::default(), + items_to_serialize: Default::default(), + } + } + + fn dynamic_items(&mut self) -> &mut DynamicItems { + &mut self.dynamic_items + } + + fn saw_bindgen_union(&mut self) { + self.saw_bindgen_union = true; + } + + fn saw_incomplete_array(&mut self) { + self.saw_incomplete_array = true; + } + + fn saw_objc(&mut self) { + self.saw_objc = true; + } + + fn saw_block(&mut self) { + self.saw_block = true; + } + + fn saw_bitfield_unit(&mut self) { + self.saw_bitfield_unit = true; + } + + fn seen>(&self, item: Id) -> bool { + self.items_seen.contains(&item.into()) + } + + fn set_seen>(&mut self, item: Id) { + self.items_seen.insert(item.into()); + } + + fn seen_function(&self, name: &str) -> bool { + self.functions_seen.contains(name) + } + + fn saw_function(&mut self, name: &str) { + self.functions_seen.insert(name.into()); + } + + /// Get the overload number for the given function name. Increments the + /// counter internally so the next time we ask for the overload for this + /// name, we get the incremented value, and so on. + fn overload_number(&mut self, name: &str) -> u32 { + let counter = self.overload_counters.entry(name.into()).or_insert(0); + let number = *counter; + *counter += 1; + number + } + + fn seen_var(&self, name: &str) -> bool { + self.vars_seen.contains(name) + } + + fn saw_var(&mut self, name: &str) { + self.vars_seen.insert(name.into()); + } + + fn inner(&mut self, cb: F) -> Vec + where + F: FnOnce(&mut Self), + { + let mut new = Self::new(self.codegen_id); + + cb(&mut new); + + self.saw_incomplete_array |= new.saw_incomplete_array; + self.saw_objc |= new.saw_objc; + self.saw_block |= new.saw_block; + self.saw_bitfield_unit |= new.saw_bitfield_unit; + self.saw_bindgen_union |= new.saw_bindgen_union; + + new.items + } +} + +impl<'a> ops::Deref for CodegenResult<'a> { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.items + } +} + +impl<'a> ops::DerefMut for CodegenResult<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.items + } +} + +/// A trait to convert a rust type into a pointer, optionally const, to the same +/// type. +trait ToPtr { + fn to_ptr(self, is_const: bool) -> proc_macro2::TokenStream; +} + +impl ToPtr for proc_macro2::TokenStream { + fn to_ptr(self, is_const: bool) -> proc_macro2::TokenStream { + if is_const { + quote! { *const #self } + } else { + quote! { *mut #self } + } + } +} + +/// An extension trait for `proc_macro2::TokenStream` that lets us append any implicit +/// template parameters that exist for some type, if necessary. +trait AppendImplicitTemplateParams { + fn append_implicit_template_params( + &mut self, + ctx: &BindgenContext, + item: &Item, + ); +} + +impl AppendImplicitTemplateParams for proc_macro2::TokenStream { + fn append_implicit_template_params( + &mut self, + ctx: &BindgenContext, + item: &Item, + ) { + let item = item.id().into_resolver().through_type_refs().resolve(ctx); + + match *item.expect_type().kind() { + TypeKind::UnresolvedTypeRef(..) => { + unreachable!("already resolved unresolved type refs") + } + TypeKind::ResolvedTypeRef(..) => { + unreachable!("we resolved item through type refs") + } + + // None of these types ever have implicit template parameters. + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Pointer(..) | + TypeKind::Reference(..) | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Complex(..) | + TypeKind::Array(..) | + TypeKind::TypeParam | + TypeKind::Opaque | + TypeKind::Function(..) | + TypeKind::Enum(..) | + TypeKind::ObjCId | + TypeKind::ObjCSel | + TypeKind::TemplateInstantiation(..) => return, + _ => {} + } + + let params: Vec<_> = item + .used_template_params(ctx) + .iter() + .map(|p| { + p.try_to_rust_ty(ctx, &()) + .expect("template params cannot fail to be a rust type") + }) + .collect(); + if !params.is_empty() { + self.append_all(quote! { + < #( #params ),* > + }); + } + } +} + +trait CodeGenerator { + /// Extra information from the caller. + type Extra; + + /// Extra information returned to the caller. + type Return; + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + extra: &Self::Extra, + ) -> Self::Return; +} + +impl Item { + fn process_before_codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult, + ) -> bool { + if !self.is_enabled_for_codegen(ctx) { + return false; + } + + if self.is_blocklisted(ctx) || result.seen(self.id()) { + debug!( + "::process_before_codegen: Ignoring hidden or seen: \ + self = {:?}", + self + ); + return false; + } + + if !ctx.codegen_items().contains(&self.id()) { + // TODO(emilio, #453): Figure out what to do when this happens + // legitimately, we could track the opaque stuff and disable the + // assertion there I guess. + warn!("Found non-allowlisted item in code generation: {:?}", self); + } + + result.set_seen(self.id()); + true + } +} + +impl CodeGenerator for Item { + type Extra = (); + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + _extra: &(), + ) { + debug!("::codegen: self = {:?}", self); + if !self.process_before_codegen(ctx, result) { + return; + } + + match *self.kind() { + ItemKind::Module(ref module) => { + module.codegen(ctx, result, self); + } + ItemKind::Function(ref fun) => { + fun.codegen(ctx, result, self); + } + ItemKind::Var(ref var) => { + var.codegen(ctx, result, self); + } + ItemKind::Type(ref ty) => { + ty.codegen(ctx, result, self); + } + } + } +} + +impl CodeGenerator for Module { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + debug!("::codegen: item = {:?}", item); + + let codegen_self = |result: &mut CodegenResult, + found_any: &mut bool| { + for child in self.children() { + if ctx.codegen_items().contains(child) { + *found_any = true; + ctx.resolve_item(*child).codegen(ctx, result, &()); + } + } + + if item.id() == ctx.root_module() { + if result.saw_block { + utils::prepend_block_header(ctx, &mut *result); + } + if result.saw_bindgen_union { + utils::prepend_union_types(ctx, &mut *result); + } + if result.saw_incomplete_array { + utils::prepend_incomplete_array_types(ctx, &mut *result); + } + if ctx.need_bindgen_complex_type() { + utils::prepend_complex_type(&mut *result); + } + if result.saw_objc { + utils::prepend_objc_header(ctx, &mut *result); + } + if result.saw_bitfield_unit { + utils::prepend_bitfield_unit_type(ctx, &mut *result); + } + } + }; + + if !ctx.options().enable_cxx_namespaces || + (self.is_inline() && + !ctx.options().conservative_inline_namespaces) + { + codegen_self(result, &mut false); + return; + } + + let mut found_any = false; + let inner_items = result.inner(|result| { + result.push(root_import(ctx, item)); + + let path = item.namespace_aware_canonical_path(ctx).join("::"); + if let Some(raw_lines) = ctx.options().module_lines.get(&path) { + for raw_line in raw_lines { + found_any = true; + result.push( + proc_macro2::TokenStream::from_str(raw_line).unwrap(), + ); + } + } + + codegen_self(result, &mut found_any); + }); + + // Don't bother creating an empty module. + if !found_any { + return; + } + + let name = item.canonical_name(ctx); + let ident = ctx.rust_ident(name); + result.push(if item.id() == ctx.root_module() { + quote! { + #[allow(non_snake_case, non_camel_case_types, non_upper_case_globals)] + pub mod #ident { + #( #inner_items )* + } + } + } else { + quote! { + pub mod #ident { + #( #inner_items )* + } + } + }); + } +} + +impl CodeGenerator for Var { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + use crate::ir::var::VarType; + debug!("::codegen: item = {:?}", item); + debug_assert!(item.is_enabled_for_codegen(ctx)); + + let canonical_name = item.canonical_name(ctx); + + if result.seen_var(&canonical_name) { + return; + } + result.saw_var(&canonical_name); + + let canonical_ident = ctx.rust_ident(&canonical_name); + + // We can't generate bindings to static variables of templates. The + // number of actual variables for a single declaration are open ended + // and we don't know what instantiations do or don't exist. + if !item.all_template_params(ctx).is_empty() { + return; + } + + let mut attrs = vec![]; + if let Some(comment) = item.comment(ctx) { + attrs.push(attributes::doc(comment)); + } + + let ty = self.ty().to_rust_ty_or_opaque(ctx, &()); + + if let Some(val) = self.val() { + match *val { + VarType::Bool(val) => { + result.push(quote! { + #(#attrs)* + pub const #canonical_ident : #ty = #val ; + }); + } + VarType::Int(val) => { + let int_kind = self + .ty() + .into_resolver() + .through_type_aliases() + .through_type_refs() + .resolve(ctx) + .expect_type() + .as_integer() + .unwrap(); + let val = if int_kind.is_signed() { + helpers::ast_ty::int_expr(val) + } else { + helpers::ast_ty::uint_expr(val as _) + }; + result.push(quote! { + #(#attrs)* + pub const #canonical_ident : #ty = #val ; + }); + } + VarType::String(ref bytes) => { + // Account the trailing zero. + // + // TODO: Here we ignore the type we just made up, probably + // we should refactor how the variable type and ty ID work. + let len = bytes.len() + 1; + let ty = quote! { + [u8; #len] + }; + + match String::from_utf8(bytes.clone()) { + Ok(string) => { + let cstr = helpers::ast_ty::cstr_expr(string); + if ctx + .options() + .rust_features + .static_lifetime_elision + { + result.push(quote! { + #(#attrs)* + pub const #canonical_ident : &#ty = #cstr ; + }); + } else { + result.push(quote! { + #(#attrs)* + pub const #canonical_ident : &'static #ty = #cstr ; + }); + } + } + Err(..) => { + let bytes = helpers::ast_ty::byte_array_expr(bytes); + result.push(quote! { + #(#attrs)* + pub const #canonical_ident : #ty = #bytes ; + }); + } + } + } + VarType::Float(f) => { + if let Ok(expr) = helpers::ast_ty::float_expr(ctx, f) { + result.push(quote! { + #(#attrs)* + pub const #canonical_ident : #ty = #expr ; + }); + } + } + VarType::Char(c) => { + result.push(quote! { + #(#attrs)* + pub const #canonical_ident : #ty = #c ; + }); + } + } + } else { + // If necessary, apply a `#[link_name]` attribute + if let Some(link_name) = self.link_name() { + attrs.push(attributes::link_name::(link_name)); + } else { + let link_name = + self.mangled_name().unwrap_or_else(|| self.name()); + if !utils::names_will_be_identical_after_mangling( + &canonical_name, + link_name, + None, + ) { + attrs.push(attributes::link_name::(link_name)); + } + } + + let maybe_mut = if self.is_const() { + quote! {} + } else { + quote! { mut } + }; + + let tokens = quote!( + extern "C" { + #(#attrs)* + pub static #maybe_mut #canonical_ident: #ty; + } + ); + + result.push(tokens); + } + } +} + +impl CodeGenerator for Type { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + debug!("::codegen: item = {:?}", item); + debug_assert!(item.is_enabled_for_codegen(ctx)); + + match *self.kind() { + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Complex(..) | + TypeKind::Array(..) | + TypeKind::Vector(..) | + TypeKind::Pointer(..) | + TypeKind::Reference(..) | + TypeKind::Function(..) | + TypeKind::ResolvedTypeRef(..) | + TypeKind::Opaque | + TypeKind::TypeParam => { + // These items don't need code generation, they only need to be + // converted to rust types in fields, arguments, and such. + // NOTE(emilio): If you add to this list, make sure to also add + // it to BindgenContext::compute_allowlisted_and_codegen_items. + } + TypeKind::TemplateInstantiation(ref inst) => { + inst.codegen(ctx, result, item) + } + TypeKind::BlockPointer(inner) => { + if !ctx.options().generate_block { + return; + } + + let inner_item = + inner.into_resolver().through_type_refs().resolve(ctx); + let name = item.canonical_name(ctx); + + let inner_rust_type = { + if let TypeKind::Function(fnsig) = + inner_item.kind().expect_type().kind() + { + utils::fnsig_block(ctx, fnsig) + } else { + panic!("invalid block typedef: {:?}", inner_item) + } + }; + + let rust_name = ctx.rust_ident(name); + + let mut tokens = if let Some(comment) = item.comment(ctx) { + attributes::doc(comment) + } else { + quote! {} + }; + + tokens.append_all(quote! { + pub type #rust_name = #inner_rust_type ; + }); + + result.push(tokens); + result.saw_block(); + } + TypeKind::Comp(ref ci) => ci.codegen(ctx, result, item), + TypeKind::TemplateAlias(inner, _) | TypeKind::Alias(inner) => { + let inner_item = + inner.into_resolver().through_type_refs().resolve(ctx); + let name = item.canonical_name(ctx); + let path = item.canonical_path(ctx); + + { + let through_type_aliases = inner + .into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(ctx); + + // Try to catch the common pattern: + // + // typedef struct foo { ... } foo; + // + // here, and also other more complex cases like #946. + if through_type_aliases.canonical_path(ctx) == path { + return; + } + } + + // If this is a known named type, disallow generating anything + // for it too. If size_t -> usize conversions are enabled, we + // need to check that these conversions are permissible, but + // nothing needs to be generated, still. + let spelling = self.name().expect("Unnamed alias?"); + if utils::type_from_named(ctx, spelling).is_some() { + if let "size_t" | "ssize_t" = spelling { + let layout = inner_item + .kind() + .expect_type() + .layout(ctx) + .expect("No layout?"); + assert_eq!( + layout.size, + ctx.target_pointer_size(), + "Target platform requires `--no-size_t-is-usize`. The size of `{}` ({}) does not match the target pointer size ({})", + spelling, + layout.size, + ctx.target_pointer_size(), + ); + assert_eq!( + layout.align, + ctx.target_pointer_size(), + "Target platform requires `--no-size_t-is-usize`. The alignment of `{}` ({}) does not match the target pointer size ({})", + spelling, + layout.align, + ctx.target_pointer_size(), + ); + } + return; + } + + let mut outer_params = item.used_template_params(ctx); + + let is_opaque = item.is_opaque(ctx, &()); + let inner_rust_type = if is_opaque { + outer_params = vec![]; + self.to_opaque(ctx, item) + } else { + // Its possible that we have better layout information than + // the inner type does, so fall back to an opaque blob based + // on our layout if converting the inner item fails. + let mut inner_ty = inner_item + .try_to_rust_ty_or_opaque(ctx, &()) + .unwrap_or_else(|_| self.to_opaque(ctx, item)); + inner_ty.append_implicit_template_params(ctx, inner_item); + inner_ty + }; + + { + // FIXME(emilio): This is a workaround to avoid generating + // incorrect type aliases because of types that we haven't + // been able to resolve (because, eg, they depend on a + // template parameter). + // + // It's kind of a shame not generating them even when they + // could be referenced, but we already do the same for items + // with invalid template parameters, and at least this way + // they can be replaced, instead of generating plain invalid + // code. + let inner_canon_type = + inner_item.expect_type().canonical_type(ctx); + if inner_canon_type.is_invalid_type_param() { + warn!( + "Item contained invalid named type, skipping: \ + {:?}, {:?}", + item, inner_item + ); + return; + } + } + + let rust_name = ctx.rust_ident(&name); + + let mut tokens = if let Some(comment) = item.comment(ctx) { + attributes::doc(comment) + } else { + quote! {} + }; + + let alias_style = if ctx.options().type_alias.matches(&name) { + AliasVariation::TypeAlias + } else if ctx.options().new_type_alias.matches(&name) { + AliasVariation::NewType + } else if ctx.options().new_type_alias_deref.matches(&name) { + AliasVariation::NewTypeDeref + } else { + ctx.options().default_alias_style + }; + + // We prefer using `pub use` over `pub type` because of: + // https://github.com/rust-lang/rust/issues/26264 + // These are the only characters allowed in simple + // paths, eg `good::dogs::Bront`. + if inner_rust_type.to_string().chars().all(|c| matches!(c, 'A'..='Z' | 'a'..='z' | '0'..='9' | ':' | '_' | ' ')) && outer_params.is_empty() && + !is_opaque && + alias_style == AliasVariation::TypeAlias && + inner_item.expect_type().canonical_type(ctx).is_enum() + { + tokens.append_all(quote! { + pub use + }); + let path = top_level_path(ctx, item); + tokens.append_separated(path, quote!(::)); + tokens.append_all(quote! { + :: #inner_rust_type as #rust_name ; + }); + result.push(tokens); + return; + } + + tokens.append_all(match alias_style { + AliasVariation::TypeAlias => quote! { + pub type #rust_name + }, + AliasVariation::NewType | AliasVariation::NewTypeDeref => { + assert!( + ctx.options().rust_features().repr_transparent, + "repr_transparent feature is required to use {:?}", + alias_style + ); + + let mut attributes = + vec![attributes::repr("transparent")]; + let packed = false; // Types can't be packed in Rust. + let derivable_traits = + derives_of_item(item, ctx, packed); + if !derivable_traits.is_empty() { + let derives: Vec<_> = derivable_traits.into(); + attributes.push(attributes::derives(&derives)) + } + + quote! { + #( #attributes )* + pub struct #rust_name + } + } + }); + + let params: Vec<_> = outer_params + .into_iter() + .filter_map(|p| p.as_template_param(ctx, &())) + .collect(); + if params + .iter() + .any(|p| ctx.resolve_type(*p).is_invalid_type_param()) + { + warn!( + "Item contained invalid template \ + parameter: {:?}", + item + ); + return; + } + let params: Vec<_> = params + .iter() + .map(|p| { + p.try_to_rust_ty(ctx, &()).expect( + "type parameters can always convert to rust ty OK", + ) + }) + .collect(); + + if !params.is_empty() { + tokens.append_all(quote! { + < #( #params ),* > + }); + } + + let access_spec = + access_specifier(ctx.options().default_visibility); + tokens.append_all(match alias_style { + AliasVariation::TypeAlias => quote! { + = #inner_rust_type ; + }, + AliasVariation::NewType | AliasVariation::NewTypeDeref => { + quote! { + (#access_spec #inner_rust_type) ; + } + } + }); + + if alias_style == AliasVariation::NewTypeDeref { + let prefix = ctx.trait_prefix(); + tokens.append_all(quote! { + impl ::#prefix::ops::Deref for #rust_name { + type Target = #inner_rust_type; + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } + } + impl ::#prefix::ops::DerefMut for #rust_name { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } + } + }); + } + + result.push(tokens); + } + TypeKind::Enum(ref ei) => ei.codegen(ctx, result, item), + TypeKind::ObjCId | TypeKind::ObjCSel => { + result.saw_objc(); + } + TypeKind::ObjCInterface(ref interface) => { + interface.codegen(ctx, result, item) + } + ref u @ TypeKind::UnresolvedTypeRef(..) => { + unreachable!("Should have been resolved after parsing {:?}!", u) + } + } + } +} + +struct Vtable<'a> { + item_id: ItemId, + /// A reference to the originating compound object. + #[allow(dead_code)] + comp_info: &'a CompInfo, +} + +impl<'a> Vtable<'a> { + fn new(item_id: ItemId, comp_info: &'a CompInfo) -> Self { + Vtable { item_id, comp_info } + } +} + +impl<'a> CodeGenerator for Vtable<'a> { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + assert_eq!(item.id(), self.item_id); + debug_assert!(item.is_enabled_for_codegen(ctx)); + let name = ctx.rust_ident(self.canonical_name(ctx)); + + // For now, we will only generate vtables for classes that: + // - do not inherit from others (compilers merge VTable from primary parent class). + // - do not contain a virtual destructor (requires ordering; platforms generate different vtables). + if ctx.options().vtable_generation && + self.comp_info.base_members().is_empty() && + self.comp_info.destructor().is_none() + { + let class_ident = ctx.rust_ident(self.item_id.canonical_name(ctx)); + + let methods = self + .comp_info + .methods() + .iter() + .filter_map(|m| { + if !m.is_virtual() { + return None; + } + + let function_item = ctx.resolve_item(m.signature()); + let function = function_item.expect_function(); + let signature_item = ctx.resolve_item(function.signature()); + let signature = match signature_item.expect_type().kind() { + TypeKind::Function(ref sig) => sig, + _ => panic!("Function signature type mismatch"), + }; + + // FIXME: Is there a canonical name without the class prepended? + let function_name = function_item.canonical_name(ctx); + + // FIXME: Need to account for overloading with times_seen (separately from regular function path). + let function_name = ctx.rust_ident(function_name); + let mut args = utils::fnsig_arguments(ctx, signature); + let ret = utils::fnsig_return_ty(ctx, signature); + + args[0] = if m.is_const() { + quote! { this: *const #class_ident } + } else { + quote! { this: *mut #class_ident } + }; + + Some(quote! { + pub #function_name : unsafe extern "C" fn( #( #args ),* ) #ret + }) + }) + .collect::>(); + + result.push(quote! { + #[repr(C)] + pub struct #name { + #( #methods ),* + } + }) + } else { + // For the cases we don't support, simply generate an empty struct. + let void = helpers::ast_ty::c_void(ctx); + + result.push(quote! { + #[repr(C)] + pub struct #name ( #void ); + }); + } + } +} + +impl<'a> ItemCanonicalName for Vtable<'a> { + fn canonical_name(&self, ctx: &BindgenContext) -> String { + format!("{}__bindgen_vtable", self.item_id.canonical_name(ctx)) + } +} + +impl<'a> TryToRustTy for Vtable<'a> { + type Extra = (); + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + _: &(), + ) -> error::Result { + let name = ctx.rust_ident(self.canonical_name(ctx)); + Ok(quote! { + #name + }) + } +} + +impl CodeGenerator for TemplateInstantiation { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + debug_assert!(item.is_enabled_for_codegen(ctx)); + + // Although uses of instantiations don't need code generation, and are + // just converted to rust types in fields, vars, etc, we take this + // opportunity to generate tests for their layout here. If the + // instantiation is opaque, then its presumably because we don't + // properly understand it (maybe because of specializations), and so we + // shouldn't emit layout tests either. + if !ctx.options().layout_tests || self.is_opaque(ctx, item) { + return; + } + + // If there are any unbound type parameters, then we can't generate a + // layout test because we aren't dealing with a concrete type with a + // concrete size and alignment. + if ctx.uses_any_template_parameters(item.id()) { + return; + } + + let layout = item.kind().expect_type().layout(ctx); + + if let Some(layout) = layout { + let size = layout.size; + let align = layout.align; + + let name = item.full_disambiguated_name(ctx); + let mut fn_name = + format!("__bindgen_test_layout_{}_instantiation", name); + let times_seen = result.overload_number(&fn_name); + if times_seen > 0 { + write!(&mut fn_name, "_{}", times_seen).unwrap(); + } + + let fn_name = ctx.rust_ident_raw(fn_name); + + let prefix = ctx.trait_prefix(); + let ident = item.to_rust_ty_or_opaque(ctx, &()); + let size_of_expr = quote! { + ::#prefix::mem::size_of::<#ident>() + }; + let align_of_expr = quote! { + ::#prefix::mem::align_of::<#ident>() + }; + + let item = quote! { + #[test] + fn #fn_name() { + assert_eq!(#size_of_expr, #size, + concat!("Size of template specialization: ", + stringify!(#ident))); + assert_eq!(#align_of_expr, #align, + concat!("Alignment of template specialization: ", + stringify!(#ident))); + } + }; + + result.push(item); + } + } +} + +/// Trait for implementing the code generation of a struct or union field. +trait FieldCodegen<'a> { + type Extra; + + #[allow(clippy::too_many_arguments)] + fn codegen( + &self, + ctx: &BindgenContext, + visibility_kind: FieldVisibilityKind, + accessor_kind: FieldAccessorKind, + parent: &CompInfo, + result: &mut CodegenResult, + struct_layout: &mut StructLayoutTracker, + fields: &mut F, + methods: &mut M, + extra: Self::Extra, + ) where + F: Extend, + M: Extend; +} + +impl<'a> FieldCodegen<'a> for Field { + type Extra = (); + + fn codegen( + &self, + ctx: &BindgenContext, + visibility_kind: FieldVisibilityKind, + accessor_kind: FieldAccessorKind, + parent: &CompInfo, + result: &mut CodegenResult, + struct_layout: &mut StructLayoutTracker, + fields: &mut F, + methods: &mut M, + _: (), + ) where + F: Extend, + M: Extend, + { + match *self { + Field::DataMember(ref data) => { + data.codegen( + ctx, + visibility_kind, + accessor_kind, + parent, + result, + struct_layout, + fields, + methods, + (), + ); + } + Field::Bitfields(ref unit) => { + unit.codegen( + ctx, + visibility_kind, + accessor_kind, + parent, + result, + struct_layout, + fields, + methods, + (), + ); + } + } + } +} + +fn wrap_union_field_if_needed( + ctx: &BindgenContext, + struct_layout: &StructLayoutTracker, + ty: proc_macro2::TokenStream, + result: &mut CodegenResult, +) -> proc_macro2::TokenStream { + if struct_layout.is_rust_union() { + if struct_layout.can_copy_union_fields() { + ty + } else { + let prefix = ctx.trait_prefix(); + quote! { + ::#prefix::mem::ManuallyDrop<#ty> + } + } + } else { + result.saw_bindgen_union(); + if ctx.options().enable_cxx_namespaces { + quote! { + root::__BindgenUnionField<#ty> + } + } else { + quote! { + __BindgenUnionField<#ty> + } + } + } +} + +impl<'a> FieldCodegen<'a> for FieldData { + type Extra = (); + + fn codegen( + &self, + ctx: &BindgenContext, + parent_visibility_kind: FieldVisibilityKind, + accessor_kind: FieldAccessorKind, + parent: &CompInfo, + result: &mut CodegenResult, + struct_layout: &mut StructLayoutTracker, + fields: &mut F, + methods: &mut M, + _: (), + ) where + F: Extend, + M: Extend, + { + // Bitfields are handled by `FieldCodegen` implementations for + // `BitfieldUnit` and `Bitfield`. + assert!(self.bitfield_width().is_none()); + + let field_item = + self.ty().into_resolver().through_type_refs().resolve(ctx); + let field_ty = field_item.expect_type(); + let mut ty = self.ty().to_rust_ty_or_opaque(ctx, &()); + ty.append_implicit_template_params(ctx, field_item); + + // NB: If supported, we use proper `union` types. + let ty = if parent.is_union() { + wrap_union_field_if_needed(ctx, struct_layout, ty, result) + } else if let Some(item) = field_ty.is_incomplete_array(ctx) { + result.saw_incomplete_array(); + + let inner = item.to_rust_ty_or_opaque(ctx, &()); + + if ctx.options().enable_cxx_namespaces { + quote! { + root::__IncompleteArrayField<#inner> + } + } else { + quote! { + __IncompleteArrayField<#inner> + } + } + } else { + ty + }; + + let mut field = quote! {}; + if ctx.options().generate_comments { + if let Some(raw_comment) = self.comment() { + let comment = ctx.options().process_comment(raw_comment); + field = attributes::doc(comment); + } + } + + let field_name = self + .name() + .map(|name| ctx.rust_mangle(name).into_owned()) + .expect("Each field should have a name in codegen!"); + let field_ident = ctx.rust_ident_raw(field_name.as_str()); + + if let Some(padding_field) = + struct_layout.saw_field(&field_name, field_ty, self.offset()) + { + fields.extend(Some(padding_field)); + } + + let visibility = compute_visibility( + ctx, + self.is_public(), + Some(self.annotations()), + parent_visibility_kind, + ); + let accessor_kind = + self.annotations().accessor_kind().unwrap_or(accessor_kind); + + match visibility { + FieldVisibilityKind::Private => { + field.append_all(quote! { + #field_ident : #ty , + }); + } + FieldVisibilityKind::PublicCrate => { + field.append_all(quote! { + pub(crate) #field_ident : #ty , + }); + } + FieldVisibilityKind::Public => { + field.append_all(quote! { + pub #field_ident : #ty , + }); + } + } + + fields.extend(Some(field)); + + // TODO: Factor the following code out, please! + if accessor_kind == FieldAccessorKind::None { + return; + } + + let getter_name = ctx.rust_ident_raw(format!("get_{}", field_name)); + let mutable_getter_name = + ctx.rust_ident_raw(format!("get_{}_mut", field_name)); + let field_name = ctx.rust_ident_raw(field_name); + + methods.extend(Some(match accessor_kind { + FieldAccessorKind::None => unreachable!(), + FieldAccessorKind::Regular => { + quote! { + #[inline] + pub fn #getter_name(&self) -> & #ty { + &self.#field_name + } + + #[inline] + pub fn #mutable_getter_name(&mut self) -> &mut #ty { + &mut self.#field_name + } + } + } + FieldAccessorKind::Unsafe => { + quote! { + #[inline] + pub unsafe fn #getter_name(&self) -> & #ty { + &self.#field_name + } + + #[inline] + pub unsafe fn #mutable_getter_name(&mut self) -> &mut #ty { + &mut self.#field_name + } + } + } + FieldAccessorKind::Immutable => { + quote! { + #[inline] + pub fn #getter_name(&self) -> & #ty { + &self.#field_name + } + } + } + })); + } +} + +impl BitfieldUnit { + /// Get the constructor name for this bitfield unit. + fn ctor_name(&self) -> proc_macro2::TokenStream { + let ctor_name = Ident::new( + &format!("new_bitfield_{}", self.nth()), + Span::call_site(), + ); + quote! { + #ctor_name + } + } +} + +impl Bitfield { + /// Extend an under construction bitfield unit constructor with this + /// bitfield. This sets the relevant bits on the `__bindgen_bitfield_unit` + /// variable that's being constructed. + fn extend_ctor_impl( + &self, + ctx: &BindgenContext, + param_name: proc_macro2::TokenStream, + mut ctor_impl: proc_macro2::TokenStream, + ) -> proc_macro2::TokenStream { + let bitfield_ty = ctx.resolve_type(self.ty()); + let bitfield_ty_layout = bitfield_ty + .layout(ctx) + .expect("Bitfield without layout? Gah!"); + let bitfield_int_ty = helpers::integer_type(ctx, bitfield_ty_layout) + .expect( + "Should already have verified that the bitfield is \ + representable as an int", + ); + + let offset = self.offset_into_unit(); + let width = self.width() as u8; + let prefix = ctx.trait_prefix(); + + ctor_impl.append_all(quote! { + __bindgen_bitfield_unit.set( + #offset, + #width, + { + let #param_name: #bitfield_int_ty = unsafe { + ::#prefix::mem::transmute(#param_name) + }; + #param_name as u64 + } + ); + }); + + ctor_impl + } +} + +fn access_specifier( + visibility: FieldVisibilityKind, +) -> proc_macro2::TokenStream { + match visibility { + FieldVisibilityKind::Private => quote! {}, + FieldVisibilityKind::PublicCrate => quote! { pub(crate) }, + FieldVisibilityKind::Public => quote! { pub }, + } +} + +/// Compute a fields or structs visibility based on multiple conditions. +/// 1. If the element was declared public, and we respect such CXX accesses specs +/// (context option) => By default Public, but this can be overruled by an `annotation`. +/// +/// 2. If the element was declared private, and we respect such CXX accesses specs +/// (context option) => By default Private, but this can be overruled by an `annotation`. +/// +/// 3. If we do not respect visibility modifiers, the result depends on the `annotation`, +/// if any, or the passed `default_kind`. +/// +fn compute_visibility( + ctx: &BindgenContext, + is_declared_public: bool, + annotations: Option<&Annotations>, + default_kind: FieldVisibilityKind, +) -> FieldVisibilityKind { + match ( + is_declared_public, + ctx.options().respect_cxx_access_specs, + annotations.and_then(|e| e.visibility_kind()), + ) { + (true, true, annotated_visibility) => { + // declared as public, cxx specs are respected + annotated_visibility.unwrap_or(FieldVisibilityKind::Public) + } + (false, true, annotated_visibility) => { + // declared as private, cxx specs are respected + annotated_visibility.unwrap_or(FieldVisibilityKind::Private) + } + (_, false, annotated_visibility) => { + // cxx specs are not respected, declaration does not matter. + annotated_visibility.unwrap_or(default_kind) + } + } +} + +impl<'a> FieldCodegen<'a> for BitfieldUnit { + type Extra = (); + + fn codegen( + &self, + ctx: &BindgenContext, + visibility_kind: FieldVisibilityKind, + accessor_kind: FieldAccessorKind, + parent: &CompInfo, + result: &mut CodegenResult, + struct_layout: &mut StructLayoutTracker, + fields: &mut F, + methods: &mut M, + _: (), + ) where + F: Extend, + M: Extend, + { + use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT; + + result.saw_bitfield_unit(); + + let layout = self.layout(); + let unit_field_ty = helpers::bitfield_unit(ctx, layout); + let field_ty = { + let unit_field_ty = unit_field_ty.clone(); + if parent.is_union() { + wrap_union_field_if_needed( + ctx, + struct_layout, + unit_field_ty, + result, + ) + } else { + unit_field_ty + } + }; + + { + let align_field_name = format!("_bitfield_align_{}", self.nth()); + let align_field_ident = ctx.rust_ident(align_field_name); + let align_ty = match self.layout().align { + n if n >= 8 => quote! { u64 }, + 4 => quote! { u32 }, + 2 => quote! { u16 }, + _ => quote! { u8 }, + }; + let access_spec = access_specifier(visibility_kind); + let align_field = quote! { + #access_spec #align_field_ident: [#align_ty; 0], + }; + fields.extend(Some(align_field)); + } + + let unit_field_name = format!("_bitfield_{}", self.nth()); + let unit_field_ident = ctx.rust_ident(&unit_field_name); + + let ctor_name = self.ctor_name(); + let mut ctor_params = vec![]; + let mut ctor_impl = quote! {}; + + // We cannot generate any constructor if the underlying storage can't + // implement AsRef<[u8]> / AsMut<[u8]> / etc, or can't derive Default. + // + // We don't check `larger_arrays` here because Default does still have + // the 32 items limitation. + let mut generate_ctor = layout.size <= RUST_DERIVE_IN_ARRAY_LIMIT; + + let mut all_fields_declared_as_public = true; + for bf in self.bitfields() { + // Codegen not allowed for anonymous bitfields + if bf.name().is_none() { + continue; + } + + if layout.size > RUST_DERIVE_IN_ARRAY_LIMIT && + !ctx.options().rust_features().larger_arrays + { + continue; + } + + all_fields_declared_as_public &= bf.is_public(); + let mut bitfield_representable_as_int = true; + bf.codegen( + ctx, + visibility_kind, + accessor_kind, + parent, + result, + struct_layout, + fields, + methods, + (&unit_field_name, &mut bitfield_representable_as_int), + ); + + // Generating a constructor requires the bitfield to be representable as an integer. + if !bitfield_representable_as_int { + generate_ctor = false; + continue; + } + + let param_name = bitfield_getter_name(ctx, bf); + let bitfield_ty_item = ctx.resolve_item(bf.ty()); + let bitfield_ty = bitfield_ty_item.expect_type(); + let bitfield_ty = + bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item); + + ctor_params.push(quote! { + #param_name : #bitfield_ty + }); + ctor_impl = bf.extend_ctor_impl(ctx, param_name, ctor_impl); + } + + let visibility_kind = compute_visibility( + ctx, + all_fields_declared_as_public, + None, + visibility_kind, + ); + let access_spec = access_specifier(visibility_kind); + + let field = quote! { + #access_spec #unit_field_ident : #field_ty , + }; + fields.extend(Some(field)); + + if generate_ctor { + methods.extend(Some(quote! { + #[inline] + #access_spec fn #ctor_name ( #( #ctor_params ),* ) -> #unit_field_ty { + let mut __bindgen_bitfield_unit: #unit_field_ty = Default::default(); + #ctor_impl + __bindgen_bitfield_unit + } + })); + } + + struct_layout.saw_bitfield_unit(layout); + } +} + +fn bitfield_getter_name( + ctx: &BindgenContext, + bitfield: &Bitfield, +) -> proc_macro2::TokenStream { + let name = bitfield.getter_name(); + let name = ctx.rust_ident_raw(name); + quote! { #name } +} + +fn bitfield_setter_name( + ctx: &BindgenContext, + bitfield: &Bitfield, +) -> proc_macro2::TokenStream { + let setter = bitfield.setter_name(); + let setter = ctx.rust_ident_raw(setter); + quote! { #setter } +} + +impl<'a> FieldCodegen<'a> for Bitfield { + type Extra = (&'a str, &'a mut bool); + + fn codegen( + &self, + ctx: &BindgenContext, + visibility_kind: FieldVisibilityKind, + _accessor_kind: FieldAccessorKind, + parent: &CompInfo, + _result: &mut CodegenResult, + struct_layout: &mut StructLayoutTracker, + _fields: &mut F, + methods: &mut M, + (unit_field_name, bitfield_representable_as_int): (&'a str, &mut bool), + ) where + F: Extend, + M: Extend, + { + let prefix = ctx.trait_prefix(); + let getter_name = bitfield_getter_name(ctx, self); + let setter_name = bitfield_setter_name(ctx, self); + let unit_field_ident = Ident::new(unit_field_name, Span::call_site()); + + let bitfield_ty_item = ctx.resolve_item(self.ty()); + let bitfield_ty = bitfield_ty_item.expect_type(); + + let bitfield_ty_layout = bitfield_ty + .layout(ctx) + .expect("Bitfield without layout? Gah!"); + let bitfield_int_ty = + match helpers::integer_type(ctx, bitfield_ty_layout) { + Some(int_ty) => { + *bitfield_representable_as_int = true; + int_ty + } + None => { + *bitfield_representable_as_int = false; + return; + } + }; + + let bitfield_ty = + bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item); + + let offset = self.offset_into_unit(); + let width = self.width() as u8; + + let visibility_kind = compute_visibility( + ctx, + self.is_public(), + Some(self.annotations()), + visibility_kind, + ); + let access_spec = access_specifier(visibility_kind); + + if parent.is_union() && !struct_layout.is_rust_union() { + methods.extend(Some(quote! { + #[inline] + #access_spec fn #getter_name(&self) -> #bitfield_ty { + unsafe { + ::#prefix::mem::transmute( + self.#unit_field_ident.as_ref().get(#offset, #width) + as #bitfield_int_ty + ) + } + } + + #[inline] + #access_spec fn #setter_name(&mut self, val: #bitfield_ty) { + unsafe { + let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); + self.#unit_field_ident.as_mut().set( + #offset, + #width, + val as u64 + ) + } + } + })); + } else { + methods.extend(Some(quote! { + #[inline] + #access_spec fn #getter_name(&self) -> #bitfield_ty { + unsafe { + ::#prefix::mem::transmute( + self.#unit_field_ident.get(#offset, #width) + as #bitfield_int_ty + ) + } + } + + #[inline] + #access_spec fn #setter_name(&mut self, val: #bitfield_ty) { + unsafe { + let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); + self.#unit_field_ident.set( + #offset, + #width, + val as u64 + ) + } + } + })); + } + } +} + +impl CodeGenerator for CompInfo { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + debug!("::codegen: item = {:?}", item); + debug_assert!(item.is_enabled_for_codegen(ctx)); + + // Don't output classes with template parameters that aren't types, and + // also don't output template specializations, neither total or partial. + if self.has_non_type_template_params() { + return; + } + + let ty = item.expect_type(); + let layout = ty.layout(ctx); + let mut packed = self.is_packed(ctx, layout.as_ref()); + + let canonical_name = item.canonical_name(ctx); + let canonical_ident = ctx.rust_ident(&canonical_name); + + // Generate the vtable from the method list if appropriate. + // + // TODO: I don't know how this could play with virtual methods that are + // not in the list of methods found by us, we'll see. Also, could the + // order of the vtable pointers vary? + // + // FIXME: Once we generate proper vtables, we need to codegen the + // vtable, but *not* generate a field for it in the case that + // HasVtable::has_vtable_ptr is false but HasVtable::has_vtable is true. + // + // Also, we need to generate the vtable in such a way it "inherits" from + // the parent too. + let is_opaque = item.is_opaque(ctx, &()); + let mut fields = vec![]; + let mut struct_layout = + StructLayoutTracker::new(ctx, self, ty, &canonical_name); + + if !is_opaque { + if item.has_vtable_ptr(ctx) { + let vtable = Vtable::new(item.id(), self); + vtable.codegen(ctx, result, item); + + let vtable_type = vtable + .try_to_rust_ty(ctx, &()) + .expect("vtable to Rust type conversion is infallible") + .to_ptr(true); + + fields.push(quote! { + pub vtable_: #vtable_type , + }); + + struct_layout.saw_vtable(); + } + + for base in self.base_members() { + if !base.requires_storage(ctx) { + continue; + } + + let inner_item = ctx.resolve_item(base.ty); + let mut inner = inner_item.to_rust_ty_or_opaque(ctx, &()); + inner.append_implicit_template_params(ctx, inner_item); + let field_name = ctx.rust_ident(&base.field_name); + + struct_layout.saw_base(inner_item.expect_type()); + + let visibility = match ( + base.is_public(), + ctx.options().respect_cxx_access_specs, + ) { + (true, true) => FieldVisibilityKind::Public, + (false, true) => FieldVisibilityKind::Private, + _ => ctx.options().default_visibility, + }; + + let access_spec = access_specifier(visibility); + fields.push(quote! { + #access_spec #field_name: #inner, + }); + } + } + + let mut methods = vec![]; + if !is_opaque { + let visibility = item + .annotations() + .visibility_kind() + .unwrap_or(ctx.options().default_visibility); + let struct_accessor_kind = item + .annotations() + .accessor_kind() + .unwrap_or(FieldAccessorKind::None); + for field in self.fields() { + field.codegen( + ctx, + visibility, + struct_accessor_kind, + self, + result, + &mut struct_layout, + &mut fields, + &mut methods, + (), + ); + } + // Check whether an explicit padding field is needed + // at the end. + if let Some(comp_layout) = layout { + fields.extend( + struct_layout + .add_tail_padding(&canonical_name, comp_layout), + ); + } + } + + if is_opaque { + // Opaque item should not have generated methods, fields. + debug_assert!(fields.is_empty()); + debug_assert!(methods.is_empty()); + } + + let is_union = self.kind() == CompKind::Union; + let layout = item.kind().expect_type().layout(ctx); + let zero_sized = item.is_zero_sized(ctx); + let forward_decl = self.is_forward_declaration(); + + let mut explicit_align = None; + + // C++ requires every struct to be addressable, so what C++ compilers do + // is making the struct 1-byte sized. + // + // This is apparently not the case for C, see: + // https://github.com/rust-lang/rust-bindgen/issues/551 + // + // Just get the layout, and assume C++ if not. + // + // NOTE: This check is conveniently here to avoid the dummy fields we + // may add for unused template parameters. + if !forward_decl && zero_sized { + let has_address = if is_opaque { + // Generate the address field if it's an opaque type and + // couldn't determine the layout of the blob. + layout.is_none() + } else { + layout.map_or(true, |l| l.size != 0) + }; + + if has_address { + let layout = Layout::new(1, 1); + let ty = helpers::blob(ctx, Layout::new(1, 1)); + struct_layout.saw_field_with_layout( + "_address", + layout, + /* offset = */ Some(0), + ); + fields.push(quote! { + pub _address: #ty, + }); + } + } + + if is_opaque { + match layout { + Some(l) => { + explicit_align = Some(l.align); + + let ty = helpers::blob(ctx, l); + fields.push(quote! { + pub _bindgen_opaque_blob: #ty , + }); + } + None => { + warn!("Opaque type without layout! Expect dragons!"); + } + } + } else if !is_union && !zero_sized { + if let Some(padding_field) = + layout.and_then(|layout| struct_layout.pad_struct(layout)) + { + fields.push(padding_field); + } + + if let Some(layout) = layout { + if struct_layout.requires_explicit_align(layout) { + if layout.align == 1 { + packed = true; + } else { + explicit_align = Some(layout.align); + if !ctx.options().rust_features.repr_align { + let ty = helpers::blob( + ctx, + Layout::new(0, layout.align), + ); + fields.push(quote! { + pub __bindgen_align: #ty , + }); + } + } + } + } + } else if is_union && !forward_decl { + // TODO(emilio): It'd be nice to unify this with the struct path + // above somehow. + let layout = layout.expect("Unable to get layout information?"); + if struct_layout.requires_explicit_align(layout) { + explicit_align = Some(layout.align); + } + + if !struct_layout.is_rust_union() { + let ty = helpers::blob(ctx, layout); + fields.push(quote! { + pub bindgen_union_field: #ty , + }) + } + } + + if forward_decl { + fields.push(quote! { + _unused: [u8; 0], + }); + } + + let mut generic_param_names = vec![]; + + for (idx, ty) in item.used_template_params(ctx).iter().enumerate() { + let param = ctx.resolve_type(*ty); + let name = param.name().unwrap(); + let ident = ctx.rust_ident(name); + generic_param_names.push(ident.clone()); + + let prefix = ctx.trait_prefix(); + let field_name = ctx.rust_ident(format!("_phantom_{}", idx)); + fields.push(quote! { + pub #field_name : ::#prefix::marker::PhantomData< + ::#prefix::cell::UnsafeCell<#ident> + > , + }); + } + + let generics = if !generic_param_names.is_empty() { + let generic_param_names = generic_param_names.clone(); + quote! { + < #( #generic_param_names ),* > + } + } else { + quote! {} + }; + + let mut attributes = vec![]; + let mut needs_clone_impl = false; + let mut needs_default_impl = false; + let mut needs_debug_impl = false; + let mut needs_partialeq_impl = false; + if let Some(comment) = item.comment(ctx) { + attributes.push(attributes::doc(comment)); + } + if packed && !is_opaque { + let n = layout.map_or(1, |l| l.align); + assert!(ctx.options().rust_features().repr_packed_n || n == 1); + let packed_repr = if n == 1 { + "packed".to_string() + } else { + format!("packed({})", n) + }; + attributes.push(attributes::repr_list(&["C", &packed_repr])); + } else { + attributes.push(attributes::repr("C")); + } + + if ctx.options().rust_features().repr_align { + if let Some(explicit) = explicit_align { + // Ensure that the struct has the correct alignment even in + // presence of alignas. + let explicit = helpers::ast_ty::int_expr(explicit as i64); + attributes.push(quote! { + #[repr(align(#explicit))] + }); + } + } + + let derivable_traits = derives_of_item(item, ctx, packed); + if !derivable_traits.contains(DerivableTraits::DEBUG) { + needs_debug_impl = ctx.options().derive_debug && + ctx.options().impl_debug && + !ctx.no_debug_by_name(item) && + !item.annotations().disallow_debug(); + } + + if !derivable_traits.contains(DerivableTraits::DEFAULT) { + needs_default_impl = ctx.options().derive_default && + !self.is_forward_declaration() && + !ctx.no_default_by_name(item) && + !item.annotations().disallow_default(); + } + + let all_template_params = item.all_template_params(ctx); + + if derivable_traits.contains(DerivableTraits::COPY) && + !derivable_traits.contains(DerivableTraits::CLONE) + { + needs_clone_impl = true; + } + + if !derivable_traits.contains(DerivableTraits::PARTIAL_EQ) { + needs_partialeq_impl = ctx.options().derive_partialeq && + ctx.options().impl_partialeq && + ctx.lookup_can_derive_partialeq_or_partialord(item.id()) == + CanDerive::Manually; + } + + let mut derives: Vec<_> = derivable_traits.into(); + derives.extend(item.annotations().derives().iter().map(String::as_str)); + + let is_rust_union = is_union && struct_layout.is_rust_union(); + + // The custom derives callback may return a list of derive attributes; + // add them to the end of the list. + let custom_derives = ctx.options().all_callbacks(|cb| { + cb.add_derives(&DeriveInfo { + name: &canonical_name, + kind: if is_rust_union { + DeriveTypeKind::Union + } else { + DeriveTypeKind::Struct + }, + }) + }); + // In most cases this will be a no-op, since custom_derives will be empty. + derives.extend(custom_derives.iter().map(|s| s.as_str())); + + if !derives.is_empty() { + attributes.push(attributes::derives(&derives)) + } + + if item.must_use(ctx) { + attributes.push(attributes::must_use()); + } + + let mut tokens = if is_rust_union { + quote! { + #( #attributes )* + pub union #canonical_ident + } + } else { + quote! { + #( #attributes )* + pub struct #canonical_ident + } + }; + + tokens.append_all(quote! { + #generics { + #( #fields )* + } + }); + result.push(tokens); + + // Generate the inner types and all that stuff. + // + // TODO: In the future we might want to be smart, and use nested + // modules, and whatnot. + for ty in self.inner_types() { + let child_item = ctx.resolve_item(*ty); + // assert_eq!(child_item.parent_id(), item.id()); + child_item.codegen(ctx, result, &()); + } + + // NOTE: Some unexposed attributes (like alignment attributes) may + // affect layout, so we're bad and pray to the gods for avoid sending + // all the tests to shit when parsing things like max_align_t. + if self.found_unknown_attr() { + warn!( + "Type {} has an unknown attribute that may affect layout", + canonical_ident + ); + } + + if all_template_params.is_empty() { + if !is_opaque { + for var in self.inner_vars() { + ctx.resolve_item(*var).codegen(ctx, result, &()); + } + } + + if ctx.options().layout_tests && !self.is_forward_declaration() { + if let Some(layout) = layout { + let fn_name = + format!("bindgen_test_layout_{}", canonical_ident); + let fn_name = ctx.rust_ident_raw(fn_name); + let prefix = ctx.trait_prefix(); + let size_of_expr = quote! { + ::#prefix::mem::size_of::<#canonical_ident>() + }; + let align_of_expr = quote! { + ::#prefix::mem::align_of::<#canonical_ident>() + }; + let size = layout.size; + let align = layout.align; + + let check_struct_align = if align > + ctx.target_pointer_size() && + !ctx.options().rust_features().repr_align + { + None + } else { + Some(quote! { + assert_eq!(#align_of_expr, + #align, + concat!("Alignment of ", stringify!(#canonical_ident))); + + }) + }; + + let should_skip_field_offset_checks = is_opaque; + + let check_field_offset = if should_skip_field_offset_checks + { + vec![] + } else { + self.fields() + .iter() + .filter_map(|field| match *field { + Field::DataMember(ref f) if f.name().is_some() => Some(f), + _ => None, + }) + .flat_map(|field| { + let name = field.name().unwrap(); + field.offset().map(|offset| { + let field_offset = offset / 8; + let field_name = ctx.rust_ident(name); + quote! { + assert_eq!( + unsafe { + ::#prefix::ptr::addr_of!((*ptr).#field_name) as usize - ptr as usize + }, + #field_offset, + concat!("Offset of field: ", stringify!(#canonical_ident), "::", stringify!(#field_name)) + ); + } + }) + }) + .collect() + }; + + let uninit_decl = if !check_field_offset.is_empty() { + // FIXME: When MSRV >= 1.59.0, we can use + // > const PTR: *const #canonical_ident = ::#prefix::mem::MaybeUninit::uninit().as_ptr(); + Some(quote! { + // Use a shared MaybeUninit so that rustc with + // opt-level=0 doesn't take too much stack space, + // see #2218. + const UNINIT: ::#prefix::mem::MaybeUninit<#canonical_ident> = ::#prefix::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); + }) + } else { + None + }; + + let item = quote! { + #[test] + fn #fn_name() { + #uninit_decl + assert_eq!(#size_of_expr, + #size, + concat!("Size of: ", stringify!(#canonical_ident))); + #check_struct_align + #( #check_field_offset )* + } + }; + result.push(item); + } + } + + let mut method_names = Default::default(); + if ctx.options().codegen_config.methods() { + for method in self.methods() { + assert!(method.kind() != MethodKind::Constructor); + method.codegen_method( + ctx, + &mut methods, + &mut method_names, + result, + self, + ); + } + } + + if ctx.options().codegen_config.constructors() { + for sig in self.constructors() { + Method::new( + MethodKind::Constructor, + *sig, + /* const */ + false, + ) + .codegen_method( + ctx, + &mut methods, + &mut method_names, + result, + self, + ); + } + } + + if ctx.options().codegen_config.destructors() { + if let Some((kind, destructor)) = self.destructor() { + debug_assert!(kind.is_destructor()); + Method::new(kind, destructor, false).codegen_method( + ctx, + &mut methods, + &mut method_names, + result, + self, + ); + } + } + } + + // NB: We can't use to_rust_ty here since for opaque types this tries to + // use the specialization knowledge to generate a blob field. + let ty_for_impl = quote! { + #canonical_ident #generics + }; + + if needs_clone_impl { + result.push(quote! { + impl #generics Clone for #ty_for_impl { + fn clone(&self) -> Self { *self } + } + }); + } + + if needs_default_impl { + let prefix = ctx.trait_prefix(); + let body = if ctx.options().rust_features().maybe_uninit { + quote! { + let mut s = ::#prefix::mem::MaybeUninit::::uninit(); + unsafe { + ::#prefix::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } + } else { + quote! { + unsafe { + let mut s: Self = ::#prefix::mem::uninitialized(); + ::#prefix::ptr::write_bytes(&mut s, 0, 1); + s + } + } + }; + // Note we use `ptr::write_bytes()` instead of `mem::zeroed()` because the latter does + // not necessarily ensure padding bytes are zeroed. Some C libraries are sensitive to + // non-zero padding bytes, especially when forwards/backwards compatability is + // involved. + result.push(quote! { + impl #generics Default for #ty_for_impl { + fn default() -> Self { + #body + } + } + }); + } + + if needs_debug_impl { + let impl_ = impl_debug::gen_debug_impl( + ctx, + self.fields(), + item, + self.kind(), + ); + + let prefix = ctx.trait_prefix(); + + result.push(quote! { + impl #generics ::#prefix::fmt::Debug for #ty_for_impl { + #impl_ + } + }); + } + + if needs_partialeq_impl { + if let Some(impl_) = impl_partialeq::gen_partialeq_impl( + ctx, + self, + item, + &ty_for_impl, + ) { + let partialeq_bounds = if !generic_param_names.is_empty() { + let bounds = generic_param_names.iter().map(|t| { + quote! { #t: PartialEq } + }); + quote! { where #( #bounds ),* } + } else { + quote! {} + }; + + let prefix = ctx.trait_prefix(); + result.push(quote! { + impl #generics ::#prefix::cmp::PartialEq for #ty_for_impl #partialeq_bounds { + #impl_ + } + }); + } + } + + if !methods.is_empty() { + result.push(quote! { + impl #generics #ty_for_impl { + #( #methods )* + } + }); + } + } +} + +impl Method { + fn codegen_method( + &self, + ctx: &BindgenContext, + methods: &mut Vec, + method_names: &mut HashSet, + result: &mut CodegenResult<'_>, + _parent: &CompInfo, + ) { + assert!({ + let cc = &ctx.options().codegen_config; + match self.kind() { + MethodKind::Constructor => cc.constructors(), + MethodKind::Destructor => cc.destructors(), + MethodKind::VirtualDestructor { .. } => cc.destructors(), + MethodKind::Static | + MethodKind::Normal | + MethodKind::Virtual { .. } => cc.methods(), + } + }); + + // TODO(emilio): We could generate final stuff at least. + if self.is_virtual() { + return; // FIXME + } + + // First of all, output the actual function. + let function_item = ctx.resolve_item(self.signature()); + if !function_item.process_before_codegen(ctx, result) { + return; + } + let function = function_item.expect_function(); + let times_seen = function.codegen(ctx, result, function_item); + let times_seen = match times_seen { + Some(seen) => seen, + None => return, + }; + let signature_item = ctx.resolve_item(function.signature()); + let mut name = match self.kind() { + MethodKind::Constructor => "new".into(), + MethodKind::Destructor => "destruct".into(), + _ => function.name().to_owned(), + }; + + let signature = match *signature_item.expect_type().kind() { + TypeKind::Function(ref sig) => sig, + _ => panic!("How in the world?"), + }; + + let supported_abi = match signature.abi(ctx, Some(&*name)) { + ClangAbi::Known(Abi::ThisCall) => { + ctx.options().rust_features().thiscall_abi + } + ClangAbi::Known(Abi::Vectorcall) => { + ctx.options().rust_features().vectorcall_abi + } + ClangAbi::Known(Abi::CUnwind) => { + ctx.options().rust_features().c_unwind_abi + } + ClangAbi::Known(Abi::EfiApi) => { + ctx.options().rust_features().abi_efiapi + } + _ => true, + }; + + if !supported_abi { + return; + } + + // Do not generate variadic methods, since rust does not allow + // implementing them, and we don't do a good job at it anyway. + if signature.is_variadic() { + return; + } + + if method_names.contains(&name) { + let mut count = 1; + let mut new_name; + + while { + new_name = format!("{}{}", name, count); + method_names.contains(&new_name) + } { + count += 1; + } + + name = new_name; + } + + method_names.insert(name.clone()); + + let mut function_name = function_item.canonical_name(ctx); + if times_seen > 0 { + write!(&mut function_name, "{}", times_seen).unwrap(); + } + let function_name = ctx.rust_ident(function_name); + let mut args = utils::fnsig_arguments(ctx, signature); + let mut ret = utils::fnsig_return_ty(ctx, signature); + + if !self.is_static() && !self.is_constructor() { + args[0] = if self.is_const() { + quote! { &self } + } else { + quote! { &mut self } + }; + } + + // If it's a constructor, we always return `Self`, and we inject the + // "this" parameter, so there's no need to ask the user for it. + // + // Note that constructors in Clang are represented as functions with + // return-type = void. + if self.is_constructor() { + args.remove(0); + ret = quote! { -> Self }; + } + + let mut exprs = + helpers::ast_ty::arguments_from_signature(signature, ctx); + + let mut stmts = vec![]; + + // If it's a constructor, we need to insert an extra parameter with a + // variable called `__bindgen_tmp` we're going to create. + if self.is_constructor() { + let prefix = ctx.trait_prefix(); + let tmp_variable_decl = if ctx + .options() + .rust_features() + .maybe_uninit + { + exprs[0] = quote! { + __bindgen_tmp.as_mut_ptr() + }; + quote! { + let mut __bindgen_tmp = ::#prefix::mem::MaybeUninit::uninit() + } + } else { + exprs[0] = quote! { + &mut __bindgen_tmp + }; + quote! { + let mut __bindgen_tmp = ::#prefix::mem::uninitialized() + } + }; + stmts.push(tmp_variable_decl); + } else if !self.is_static() { + assert!(!exprs.is_empty()); + exprs[0] = quote! { + self + }; + }; + + let call = quote! { + #function_name (#( #exprs ),* ) + }; + + stmts.push(call); + + if self.is_constructor() { + stmts.push(if ctx.options().rust_features().maybe_uninit { + quote! { + __bindgen_tmp.assume_init() + } + } else { + quote! { + __bindgen_tmp + } + }) + } + + let block = ctx.wrap_unsafe_ops(quote! ( #( #stmts );*)); + + let mut attrs = vec![attributes::inline()]; + + if signature.must_use() && + ctx.options().rust_features().must_use_function + { + attrs.push(attributes::must_use()); + } + + let name = ctx.rust_ident(&name); + methods.push(quote! { + #(#attrs)* + pub unsafe fn #name ( #( #args ),* ) #ret { + #block + } + }); + } +} + +/// A helper type that represents different enum variations. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum EnumVariation { + /// The code for this enum will use a Rust enum. Note that creating this in unsafe code + /// (including FFI) with an invalid value will invoke undefined behaviour, whether or not + /// its marked as non_exhaustive. + Rust { + /// Indicates whether the generated struct should be `#[non_exhaustive]` + non_exhaustive: bool, + }, + /// The code for this enum will use a newtype + NewType { + /// Indicates whether the newtype will have bitwise operators + is_bitfield: bool, + /// Indicates whether the variants will be represented as global constants + is_global: bool, + }, + /// The code for this enum will use consts + Consts, + /// The code for this enum will use a module containing consts + ModuleConsts, +} + +impl EnumVariation { + fn is_rust(&self) -> bool { + matches!(*self, EnumVariation::Rust { .. }) + } + + /// Both the `Const` and `ModuleConsts` variants will cause this to return + /// true. + fn is_const(&self) -> bool { + matches!(*self, EnumVariation::Consts | EnumVariation::ModuleConsts) + } +} + +impl Default for EnumVariation { + fn default() -> EnumVariation { + EnumVariation::Consts + } +} + +impl fmt::Display for EnumVariation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Self::Rust { + non_exhaustive: false, + } => "rust", + Self::Rust { + non_exhaustive: true, + } => "rust_non_exhaustive", + Self::NewType { + is_bitfield: true, .. + } => "bitfield", + Self::NewType { + is_bitfield: false, + is_global, + } => { + if *is_global { + "newtype_global" + } else { + "newtype" + } + } + Self::Consts => "consts", + Self::ModuleConsts => "moduleconsts", + }; + s.fmt(f) + } +} + +impl std::str::FromStr for EnumVariation { + type Err = std::io::Error; + + /// Create a `EnumVariation` from a string. + fn from_str(s: &str) -> Result { + match s { + "rust" => Ok(EnumVariation::Rust { + non_exhaustive: false, + }), + "rust_non_exhaustive" => Ok(EnumVariation::Rust { + non_exhaustive: true, + }), + "bitfield" => Ok(EnumVariation::NewType { + is_bitfield: true, + is_global: false, + }), + "consts" => Ok(EnumVariation::Consts), + "moduleconsts" => Ok(EnumVariation::ModuleConsts), + "newtype" => Ok(EnumVariation::NewType { + is_bitfield: false, + is_global: false, + }), + "newtype_global" => Ok(EnumVariation::NewType { + is_bitfield: false, + is_global: true, + }), + _ => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + concat!( + "Got an invalid EnumVariation. Accepted values ", + "are 'rust', 'rust_non_exhaustive', 'bitfield', 'consts',", + "'moduleconsts', 'newtype' and 'newtype_global'." + ), + )), + } + } +} + +/// A helper type to construct different enum variations. +enum EnumBuilder<'a> { + Rust { + attrs: Vec, + ident: Ident, + tokens: proc_macro2::TokenStream, + emitted_any_variants: bool, + }, + NewType { + canonical_name: &'a str, + tokens: proc_macro2::TokenStream, + is_bitfield: bool, + is_global: bool, + }, + Consts { + variants: Vec, + }, + ModuleConsts { + module_name: &'a str, + module_items: Vec, + }, +} + +impl<'a> EnumBuilder<'a> { + /// Returns true if the builder is for a rustified enum. + fn is_rust_enum(&self) -> bool { + matches!(*self, EnumBuilder::Rust { .. }) + } + + /// Create a new enum given an item builder, a canonical name, a name for + /// the representation, and which variation it should be generated as. + fn new( + name: &'a str, + mut attrs: Vec, + repr: proc_macro2::TokenStream, + enum_variation: EnumVariation, + has_typedef: bool, + ) -> Self { + let ident = Ident::new(name, Span::call_site()); + + match enum_variation { + EnumVariation::NewType { + is_bitfield, + is_global, + } => EnumBuilder::NewType { + canonical_name: name, + tokens: quote! { + #( #attrs )* + pub struct #ident (pub #repr); + }, + is_bitfield, + is_global, + }, + + EnumVariation::Rust { .. } => { + // `repr` is guaranteed to be Rustified in Enum::codegen + attrs.insert(0, quote! { #[repr( #repr )] }); + let tokens = quote!(); + EnumBuilder::Rust { + attrs, + ident, + tokens, + emitted_any_variants: false, + } + } + + EnumVariation::Consts => { + let mut variants = Vec::new(); + + if !has_typedef { + variants.push(quote! { + #( #attrs )* + pub type #ident = #repr; + }); + } + + EnumBuilder::Consts { variants } + } + + EnumVariation::ModuleConsts => { + let ident = Ident::new( + CONSTIFIED_ENUM_MODULE_REPR_NAME, + Span::call_site(), + ); + let type_definition = quote! { + #( #attrs )* + pub type #ident = #repr; + }; + + EnumBuilder::ModuleConsts { + module_name: name, + module_items: vec![type_definition], + } + } + } + } + + /// Add a variant to this enum. + fn with_variant( + self, + ctx: &BindgenContext, + variant: &EnumVariant, + mangling_prefix: Option<&str>, + rust_ty: proc_macro2::TokenStream, + result: &mut CodegenResult<'_>, + is_ty_named: bool, + ) -> Self { + let variant_name = ctx.rust_mangle(variant.name()); + let is_rust_enum = self.is_rust_enum(); + let expr = match variant.val() { + EnumVariantValue::Boolean(v) if is_rust_enum => { + helpers::ast_ty::uint_expr(v as u64) + } + EnumVariantValue::Boolean(v) => quote!(#v), + EnumVariantValue::Signed(v) => helpers::ast_ty::int_expr(v), + EnumVariantValue::Unsigned(v) => helpers::ast_ty::uint_expr(v), + }; + + let mut doc = quote! {}; + if ctx.options().generate_comments { + if let Some(raw_comment) = variant.comment() { + let comment = ctx.options().process_comment(raw_comment); + doc = attributes::doc(comment); + } + } + + match self { + EnumBuilder::Rust { + attrs, + ident, + tokens, + emitted_any_variants: _, + } => { + let name = ctx.rust_ident(variant_name); + EnumBuilder::Rust { + attrs, + ident, + tokens: quote! { + #tokens + #doc + #name = #expr, + }, + emitted_any_variants: true, + } + } + + EnumBuilder::NewType { + canonical_name, + is_global, + .. + } => { + if ctx.options().rust_features().associated_const && + is_ty_named && + !is_global + { + let enum_ident = ctx.rust_ident(canonical_name); + let variant_ident = ctx.rust_ident(variant_name); + + result.push(quote! { + impl #enum_ident { + #doc + pub const #variant_ident : #rust_ty = #rust_ty ( #expr ); + } + }); + } else { + let ident = ctx.rust_ident(match mangling_prefix { + Some(prefix) => { + Cow::Owned(format!("{}_{}", prefix, variant_name)) + } + None => variant_name, + }); + result.push(quote! { + #doc + pub const #ident : #rust_ty = #rust_ty ( #expr ); + }); + } + + self + } + + EnumBuilder::Consts { .. } => { + let constant_name = match mangling_prefix { + Some(prefix) => { + Cow::Owned(format!("{}_{}", prefix, variant_name)) + } + None => variant_name, + }; + + let ident = ctx.rust_ident(constant_name); + result.push(quote! { + #doc + pub const #ident : #rust_ty = #expr ; + }); + + self + } + EnumBuilder::ModuleConsts { + module_name, + mut module_items, + } => { + let name = ctx.rust_ident(variant_name); + let ty = ctx.rust_ident(CONSTIFIED_ENUM_MODULE_REPR_NAME); + module_items.push(quote! { + #doc + pub const #name : #ty = #expr ; + }); + + EnumBuilder::ModuleConsts { + module_name, + module_items, + } + } + } + } + + fn build( + self, + ctx: &BindgenContext, + rust_ty: proc_macro2::TokenStream, + result: &mut CodegenResult<'_>, + ) -> proc_macro2::TokenStream { + match self { + EnumBuilder::Rust { + attrs, + ident, + tokens, + emitted_any_variants, + .. + } => { + let variants = if !emitted_any_variants { + quote!(__bindgen_cannot_repr_c_on_empty_enum = 0) + } else { + tokens + }; + + quote! { + #( #attrs )* + pub enum #ident { + #variants + } + } + } + EnumBuilder::NewType { + canonical_name, + tokens, + is_bitfield, + .. + } => { + if !is_bitfield { + return tokens; + } + + let rust_ty_name = ctx.rust_ident_raw(canonical_name); + let prefix = ctx.trait_prefix(); + + result.push(quote! { + impl ::#prefix::ops::BitOr<#rust_ty> for #rust_ty { + type Output = Self; + + #[inline] + fn bitor(self, other: Self) -> Self { + #rust_ty_name(self.0 | other.0) + } + } + }); + + result.push(quote! { + impl ::#prefix::ops::BitOrAssign for #rust_ty { + #[inline] + fn bitor_assign(&mut self, rhs: #rust_ty) { + self.0 |= rhs.0; + } + } + }); + + result.push(quote! { + impl ::#prefix::ops::BitAnd<#rust_ty> for #rust_ty { + type Output = Self; + + #[inline] + fn bitand(self, other: Self) -> Self { + #rust_ty_name(self.0 & other.0) + } + } + }); + + result.push(quote! { + impl ::#prefix::ops::BitAndAssign for #rust_ty { + #[inline] + fn bitand_assign(&mut self, rhs: #rust_ty) { + self.0 &= rhs.0; + } + } + }); + + tokens + } + EnumBuilder::Consts { variants, .. } => quote! { #( #variants )* }, + EnumBuilder::ModuleConsts { + module_items, + module_name, + .. + } => { + let ident = ctx.rust_ident(module_name); + quote! { + pub mod #ident { + #( #module_items )* + } + } + } + } + } +} + +impl CodeGenerator for Enum { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + debug!("::codegen: item = {:?}", item); + debug_assert!(item.is_enabled_for_codegen(ctx)); + + let name = item.canonical_name(ctx); + let ident = ctx.rust_ident(&name); + let enum_ty = item.expect_type(); + let layout = enum_ty.layout(ctx); + let variation = self.computed_enum_variation(ctx, item); + + let repr_translated; + let repr = match self.repr().map(|repr| ctx.resolve_type(repr)) { + Some(repr) + if !ctx.options().translate_enum_integer_types && + !variation.is_rust() => + { + repr + } + repr => { + // An enum's integer type is translated to a native Rust + // integer type in 3 cases: + // * the enum is Rustified and we need a translated type for + // the repr attribute + // * the representation couldn't be determined from the C source + // * it was explicitly requested as a bindgen option + + let kind = match repr { + Some(repr) => match *repr.canonical_type(ctx).kind() { + TypeKind::Int(int_kind) => int_kind, + _ => panic!("Unexpected type as enum repr"), + }, + None => { + warn!( + "Guessing type of enum! Forward declarations of enums \ + shouldn't be legal!" + ); + IntKind::Int + } + }; + + let signed = kind.is_signed(); + let size = layout + .map(|l| l.size) + .or_else(|| kind.known_size()) + .unwrap_or(0); + + let translated = match (signed, size) { + (true, 1) => IntKind::I8, + (false, 1) => IntKind::U8, + (true, 2) => IntKind::I16, + (false, 2) => IntKind::U16, + (true, 4) => IntKind::I32, + (false, 4) => IntKind::U32, + (true, 8) => IntKind::I64, + (false, 8) => IntKind::U64, + _ => { + warn!( + "invalid enum decl: signed: {}, size: {}", + signed, size + ); + IntKind::I32 + } + }; + + repr_translated = + Type::new(None, None, TypeKind::Int(translated), false); + &repr_translated + } + }; + + let mut attrs = vec![]; + + // TODO(emilio): Delegate this to the builders? + match variation { + EnumVariation::Rust { non_exhaustive } => { + if non_exhaustive && + ctx.options().rust_features().non_exhaustive + { + attrs.push(attributes::non_exhaustive()); + } else if non_exhaustive && + !ctx.options().rust_features().non_exhaustive + { + panic!("The rust target you're using doesn't seem to support non_exhaustive enums"); + } + } + EnumVariation::NewType { .. } => { + if ctx.options().rust_features.repr_transparent { + attrs.push(attributes::repr("transparent")); + } else { + attrs.push(attributes::repr("C")); + } + } + _ => {} + }; + + if let Some(comment) = item.comment(ctx) { + attrs.push(attributes::doc(comment)); + } + + if item.must_use(ctx) { + attrs.push(attributes::must_use()); + } + + if !variation.is_const() { + let packed = false; // Enums can't be packed in Rust. + let mut derives = derives_of_item(item, ctx, packed); + // For backwards compat, enums always derive + // Clone/Eq/PartialEq/Hash, even if we don't generate those by + // default. + derives.insert( + DerivableTraits::CLONE | + DerivableTraits::HASH | + DerivableTraits::PARTIAL_EQ | + DerivableTraits::EQ, + ); + let mut derives: Vec<_> = derives.into(); + for derive in item.annotations().derives().iter() { + if !derives.contains(&derive.as_str()) { + derives.push(derive); + } + } + + // The custom derives callback may return a list of derive attributes; + // add them to the end of the list. + let custom_derives = ctx.options().all_callbacks(|cb| { + cb.add_derives(&DeriveInfo { + name: &name, + kind: DeriveTypeKind::Enum, + }) + }); + // In most cases this will be a no-op, since custom_derives will be empty. + derives.extend(custom_derives.iter().map(|s| s.as_str())); + + attrs.push(attributes::derives(&derives)); + } + + fn add_constant( + ctx: &BindgenContext, + enum_: &Type, + // Only to avoid recomputing every time. + enum_canonical_name: &Ident, + // May be the same as "variant" if it's because the + // enum is unnamed and we still haven't seen the + // value. + variant_name: &Ident, + referenced_name: &Ident, + enum_rust_ty: proc_macro2::TokenStream, + result: &mut CodegenResult<'_>, + ) { + let constant_name = if enum_.name().is_some() { + if ctx.options().prepend_enum_name { + format!("{}_{}", enum_canonical_name, variant_name) + } else { + format!("{}", variant_name) + } + } else { + format!("{}", variant_name) + }; + let constant_name = ctx.rust_ident(constant_name); + + result.push(quote! { + pub const #constant_name : #enum_rust_ty = + #enum_canonical_name :: #referenced_name ; + }); + } + + let repr = repr.to_rust_ty_or_opaque(ctx, item); + let has_typedef = ctx.is_enum_typedef_combo(item.id()); + + let mut builder = + EnumBuilder::new(&name, attrs, repr, variation, has_typedef); + + // A map where we keep a value -> variant relation. + let mut seen_values = HashMap::<_, Ident>::default(); + let enum_rust_ty = item.to_rust_ty_or_opaque(ctx, &()); + let is_toplevel = item.is_toplevel(ctx); + + // Used to mangle the constants we generate in the unnamed-enum case. + let parent_canonical_name = if is_toplevel { + None + } else { + Some(item.parent_id().canonical_name(ctx)) + }; + + let constant_mangling_prefix = if ctx.options().prepend_enum_name { + if enum_ty.name().is_none() { + parent_canonical_name.as_deref() + } else { + Some(&*name) + } + } else { + None + }; + + // NB: We defer the creation of constified variants, in case we find + // another variant with the same value (which is the common thing to + // do). + let mut constified_variants = VecDeque::new(); + + let mut iter = self.variants().iter().peekable(); + while let Some(variant) = + iter.next().or_else(|| constified_variants.pop_front()) + { + if variant.hidden() { + continue; + } + + if variant.force_constification() && iter.peek().is_some() { + constified_variants.push_back(variant); + continue; + } + + match seen_values.entry(variant.val()) { + Entry::Occupied(ref entry) => { + if variation.is_rust() { + let variant_name = ctx.rust_mangle(variant.name()); + let mangled_name = + if is_toplevel || enum_ty.name().is_some() { + variant_name + } else { + let parent_name = + parent_canonical_name.as_ref().unwrap(); + + Cow::Owned(format!( + "{}_{}", + parent_name, variant_name + )) + }; + + let existing_variant_name = entry.get(); + // Use associated constants for named enums. + if enum_ty.name().is_some() && + ctx.options().rust_features().associated_const + { + let enum_canonical_name = &ident; + let variant_name = + ctx.rust_ident_raw(&*mangled_name); + result.push(quote! { + impl #enum_rust_ty { + pub const #variant_name : #enum_rust_ty = + #enum_canonical_name :: #existing_variant_name ; + } + }); + } else { + add_constant( + ctx, + enum_ty, + &ident, + &Ident::new(&mangled_name, Span::call_site()), + existing_variant_name, + enum_rust_ty.clone(), + result, + ); + } + } else { + builder = builder.with_variant( + ctx, + variant, + constant_mangling_prefix, + enum_rust_ty.clone(), + result, + enum_ty.name().is_some(), + ); + } + } + Entry::Vacant(entry) => { + builder = builder.with_variant( + ctx, + variant, + constant_mangling_prefix, + enum_rust_ty.clone(), + result, + enum_ty.name().is_some(), + ); + + let variant_name = ctx.rust_ident(variant.name()); + + // If it's an unnamed enum, or constification is enforced, + // we also generate a constant so it can be properly + // accessed. + if (variation.is_rust() && enum_ty.name().is_none()) || + variant.force_constification() + { + let mangled_name = if is_toplevel { + variant_name.clone() + } else { + let parent_name = + parent_canonical_name.as_ref().unwrap(); + + Ident::new( + &format!("{}_{}", parent_name, variant_name), + Span::call_site(), + ) + }; + + add_constant( + ctx, + enum_ty, + &ident, + &mangled_name, + &variant_name, + enum_rust_ty.clone(), + result, + ); + } + + entry.insert(variant_name); + } + } + } + + let item = builder.build(ctx, enum_rust_ty, result); + result.push(item); + } +} + +/// Enum for the default type of macro constants. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum MacroTypeVariation { + /// Use i32 or i64 + Signed, + /// Use u32 or u64 + Unsigned, +} + +impl fmt::Display for MacroTypeVariation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Self::Signed => "signed", + Self::Unsigned => "unsigned", + }; + s.fmt(f) + } +} + +impl Default for MacroTypeVariation { + fn default() -> MacroTypeVariation { + MacroTypeVariation::Unsigned + } +} + +impl std::str::FromStr for MacroTypeVariation { + type Err = std::io::Error; + + /// Create a `MacroTypeVariation` from a string. + fn from_str(s: &str) -> Result { + match s { + "signed" => Ok(MacroTypeVariation::Signed), + "unsigned" => Ok(MacroTypeVariation::Unsigned), + _ => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + concat!( + "Got an invalid MacroTypeVariation. Accepted values ", + "are 'signed' and 'unsigned'" + ), + )), + } + } +} + +/// Enum for how aliases should be translated. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum AliasVariation { + /// Convert to regular Rust alias + TypeAlias, + /// Create a new type by wrapping the old type in a struct and using #[repr(transparent)] + NewType, + /// Same as NewStruct but also impl Deref to be able to use the methods of the wrapped type + NewTypeDeref, +} + +impl fmt::Display for AliasVariation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Self::TypeAlias => "type_alias", + Self::NewType => "new_type", + Self::NewTypeDeref => "new_type_deref", + }; + + s.fmt(f) + } +} + +impl Default for AliasVariation { + fn default() -> AliasVariation { + AliasVariation::TypeAlias + } +} + +impl std::str::FromStr for AliasVariation { + type Err = std::io::Error; + + /// Create an `AliasVariation` from a string. + fn from_str(s: &str) -> Result { + match s { + "type_alias" => Ok(AliasVariation::TypeAlias), + "new_type" => Ok(AliasVariation::NewType), + "new_type_deref" => Ok(AliasVariation::NewTypeDeref), + _ => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + concat!( + "Got an invalid AliasVariation. Accepted values ", + "are 'type_alias', 'new_type', and 'new_type_deref'" + ), + )), + } + } +} + +/// Enum for how non-`Copy` `union`s should be translated. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum NonCopyUnionStyle { + /// Wrap members in a type generated by `bindgen`. + BindgenWrapper, + /// Wrap members in [`::core::mem::ManuallyDrop`]. + /// + /// Note: `ManuallyDrop` was stabilized in Rust 1.20.0, do not use it if your + /// MSRV is lower. + ManuallyDrop, +} + +impl fmt::Display for NonCopyUnionStyle { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Self::BindgenWrapper => "bindgen_wrapper", + Self::ManuallyDrop => "manually_drop", + }; + + s.fmt(f) + } +} + +impl Default for NonCopyUnionStyle { + fn default() -> Self { + Self::BindgenWrapper + } +} + +impl std::str::FromStr for NonCopyUnionStyle { + type Err = std::io::Error; + + fn from_str(s: &str) -> Result { + match s { + "bindgen_wrapper" => Ok(Self::BindgenWrapper), + "manually_drop" => Ok(Self::ManuallyDrop), + _ => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + concat!( + "Got an invalid NonCopyUnionStyle. Accepted values ", + "are 'bindgen_wrapper' and 'manually_drop'" + ), + )), + } + } +} + +/// Fallible conversion to an opaque blob. +/// +/// Implementors of this trait should provide the `try_get_layout` method to +/// fallibly get this thing's layout, which the provided `try_to_opaque` trait +/// method will use to convert the `Layout` into an opaque blob Rust type. +trait TryToOpaque { + type Extra; + + /// Get the layout for this thing, if one is available. + fn try_get_layout( + &self, + ctx: &BindgenContext, + extra: &Self::Extra, + ) -> error::Result; + + /// Do not override this provided trait method. + fn try_to_opaque( + &self, + ctx: &BindgenContext, + extra: &Self::Extra, + ) -> error::Result { + self.try_get_layout(ctx, extra) + .map(|layout| helpers::blob(ctx, layout)) + } +} + +/// Infallible conversion of an IR thing to an opaque blob. +/// +/// The resulting layout is best effort, and is unfortunately not guaranteed to +/// be correct. When all else fails, we fall back to a single byte layout as a +/// last resort, because C++ does not permit zero-sized types. See the note in +/// the `ToRustTyOrOpaque` doc comment about fallible versus infallible traits +/// and when each is appropriate. +/// +/// Don't implement this directly. Instead implement `TryToOpaque`, and then +/// leverage the blanket impl for this trait. +trait ToOpaque: TryToOpaque { + fn get_layout(&self, ctx: &BindgenContext, extra: &Self::Extra) -> Layout { + self.try_get_layout(ctx, extra) + .unwrap_or_else(|_| Layout::for_size(ctx, 1)) + } + + fn to_opaque( + &self, + ctx: &BindgenContext, + extra: &Self::Extra, + ) -> proc_macro2::TokenStream { + let layout = self.get_layout(ctx, extra); + helpers::blob(ctx, layout) + } +} + +impl ToOpaque for T where T: TryToOpaque {} + +/// Fallible conversion from an IR thing to an *equivalent* Rust type. +/// +/// If the C/C++ construct represented by the IR thing cannot (currently) be +/// represented in Rust (for example, instantiations of templates with +/// const-value generic parameters) then the impl should return an `Err`. It +/// should *not* attempt to return an opaque blob with the correct size and +/// alignment. That is the responsibility of the `TryToOpaque` trait. +trait TryToRustTy { + type Extra; + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + extra: &Self::Extra, + ) -> error::Result; +} + +/// Fallible conversion to a Rust type or an opaque blob with the correct size +/// and alignment. +/// +/// Don't implement this directly. Instead implement `TryToRustTy` and +/// `TryToOpaque`, and then leverage the blanket impl for this trait below. +trait TryToRustTyOrOpaque: TryToRustTy + TryToOpaque { + type Extra; + + fn try_to_rust_ty_or_opaque( + &self, + ctx: &BindgenContext, + extra: &::Extra, + ) -> error::Result; +} + +impl TryToRustTyOrOpaque for T +where + T: TryToRustTy + TryToOpaque, +{ + type Extra = E; + + fn try_to_rust_ty_or_opaque( + &self, + ctx: &BindgenContext, + extra: &E, + ) -> error::Result { + self.try_to_rust_ty(ctx, extra).or_else(|_| { + if let Ok(layout) = self.try_get_layout(ctx, extra) { + Ok(helpers::blob(ctx, layout)) + } else { + Err(error::Error::NoLayoutForOpaqueBlob) + } + }) + } +} + +/// Infallible conversion to a Rust type, or an opaque blob with a best effort +/// of correct size and alignment. +/// +/// Don't implement this directly. Instead implement `TryToRustTy` and +/// `TryToOpaque`, and then leverage the blanket impl for this trait below. +/// +/// ### Fallible vs. Infallible Conversions to Rust Types +/// +/// When should one use this infallible `ToRustTyOrOpaque` trait versus the +/// fallible `TryTo{RustTy, Opaque, RustTyOrOpaque}` triats? All fallible trait +/// implementations that need to convert another thing into a Rust type or +/// opaque blob in a nested manner should also use fallible trait methods and +/// propagate failure up the stack. Only infallible functions and methods like +/// CodeGenerator implementations should use the infallible +/// `ToRustTyOrOpaque`. The further out we push error recovery, the more likely +/// we are to get a usable `Layout` even if we can't generate an equivalent Rust +/// type for a C++ construct. +trait ToRustTyOrOpaque: TryToRustTy + ToOpaque { + type Extra; + + fn to_rust_ty_or_opaque( + &self, + ctx: &BindgenContext, + extra: &::Extra, + ) -> proc_macro2::TokenStream; +} + +impl ToRustTyOrOpaque for T +where + T: TryToRustTy + ToOpaque, +{ + type Extra = E; + + fn to_rust_ty_or_opaque( + &self, + ctx: &BindgenContext, + extra: &E, + ) -> proc_macro2::TokenStream { + self.try_to_rust_ty(ctx, extra) + .unwrap_or_else(|_| self.to_opaque(ctx, extra)) + } +} + +impl TryToOpaque for T +where + T: Copy + Into, +{ + type Extra = (); + + fn try_get_layout( + &self, + ctx: &BindgenContext, + _: &(), + ) -> error::Result { + ctx.resolve_item((*self).into()).try_get_layout(ctx, &()) + } +} + +impl TryToRustTy for T +where + T: Copy + Into, +{ + type Extra = (); + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + _: &(), + ) -> error::Result { + ctx.resolve_item((*self).into()).try_to_rust_ty(ctx, &()) + } +} + +impl TryToOpaque for Item { + type Extra = (); + + fn try_get_layout( + &self, + ctx: &BindgenContext, + _: &(), + ) -> error::Result { + self.kind().expect_type().try_get_layout(ctx, self) + } +} + +impl TryToRustTy for Item { + type Extra = (); + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + _: &(), + ) -> error::Result { + self.kind().expect_type().try_to_rust_ty(ctx, self) + } +} + +impl TryToOpaque for Type { + type Extra = Item; + + fn try_get_layout( + &self, + ctx: &BindgenContext, + _: &Item, + ) -> error::Result { + self.layout(ctx).ok_or(error::Error::NoLayoutForOpaqueBlob) + } +} + +impl TryToRustTy for Type { + type Extra = Item; + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> error::Result { + use self::helpers::ast_ty::*; + + match *self.kind() { + TypeKind::Void => Ok(c_void(ctx)), + // TODO: we should do something smart with nullptr, or maybe *const + // c_void is enough? + TypeKind::NullPtr => Ok(c_void(ctx).to_ptr(true)), + TypeKind::Int(ik) => { + match ik { + IntKind::Bool => Ok(quote! { bool }), + IntKind::Char { .. } => Ok(raw_type(ctx, "c_char")), + IntKind::SChar => Ok(raw_type(ctx, "c_schar")), + IntKind::UChar => Ok(raw_type(ctx, "c_uchar")), + IntKind::Short => Ok(raw_type(ctx, "c_short")), + IntKind::UShort => Ok(raw_type(ctx, "c_ushort")), + IntKind::Int => Ok(raw_type(ctx, "c_int")), + IntKind::UInt => Ok(raw_type(ctx, "c_uint")), + IntKind::Long => Ok(raw_type(ctx, "c_long")), + IntKind::ULong => Ok(raw_type(ctx, "c_ulong")), + IntKind::LongLong => Ok(raw_type(ctx, "c_longlong")), + IntKind::ULongLong => Ok(raw_type(ctx, "c_ulonglong")), + IntKind::WChar => { + let layout = self + .layout(ctx) + .expect("Couldn't compute wchar_t's layout?"); + let ty = Layout::known_type_for_size(ctx, layout.size) + .expect("Non-representable wchar_t?"); + let ident = ctx.rust_ident_raw(ty); + Ok(quote! { #ident }) + } + + IntKind::I8 => Ok(quote! { i8 }), + IntKind::U8 => Ok(quote! { u8 }), + IntKind::I16 => Ok(quote! { i16 }), + IntKind::U16 => Ok(quote! { u16 }), + IntKind::I32 => Ok(quote! { i32 }), + IntKind::U32 => Ok(quote! { u32 }), + IntKind::I64 => Ok(quote! { i64 }), + IntKind::U64 => Ok(quote! { u64 }), + IntKind::Custom { name, .. } => { + Ok(proc_macro2::TokenStream::from_str(name).unwrap()) + } + IntKind::U128 => { + Ok(if ctx.options().rust_features.i128_and_u128 { + quote! { u128 } + } else { + // Best effort thing, but wrong alignment + // unfortunately. + quote! { [u64; 2] } + }) + } + IntKind::I128 => { + Ok(if ctx.options().rust_features.i128_and_u128 { + quote! { i128 } + } else { + quote! { [u64; 2] } + }) + } + } + } + TypeKind::Float(fk) => { + Ok(float_kind_rust_type(ctx, fk, self.layout(ctx))) + } + TypeKind::Complex(fk) => { + let float_path = + float_kind_rust_type(ctx, fk, self.layout(ctx)); + + ctx.generated_bindgen_complex(); + Ok(if ctx.options().enable_cxx_namespaces { + quote! { + root::__BindgenComplex<#float_path> + } + } else { + quote! { + __BindgenComplex<#float_path> + } + }) + } + TypeKind::Function(ref fs) => { + // We can't rely on the sizeof(Option>) == + // sizeof(NonZero<_>) optimization with opaque blobs (because + // they aren't NonZero), so don't *ever* use an or_opaque + // variant here. + let ty = fs.try_to_rust_ty(ctx, &())?; + + let prefix = ctx.trait_prefix(); + Ok(quote! { + ::#prefix::option::Option<#ty> + }) + } + TypeKind::Array(item, len) | TypeKind::Vector(item, len) => { + let ty = item.try_to_rust_ty(ctx, &())?; + Ok(quote! { + [ #ty ; #len ] + }) + } + TypeKind::Enum(..) => { + let path = item.namespace_aware_canonical_path(ctx); + let path = proc_macro2::TokenStream::from_str(&path.join("::")) + .unwrap(); + Ok(quote!(#path)) + } + TypeKind::TemplateInstantiation(ref inst) => { + inst.try_to_rust_ty(ctx, item) + } + TypeKind::ResolvedTypeRef(inner) => inner.try_to_rust_ty(ctx, &()), + TypeKind::TemplateAlias(..) | + TypeKind::Alias(..) | + TypeKind::BlockPointer(..) => { + if self.is_block_pointer() && !ctx.options().generate_block { + let void = c_void(ctx); + return Ok(void.to_ptr(/* is_const = */ false)); + } + + if item.is_opaque(ctx, &()) && + item.used_template_params(ctx) + .into_iter() + .any(|param| param.is_template_param(ctx, &())) + { + self.try_to_opaque(ctx, item) + } else if let Some(ty) = self + .name() + .and_then(|name| utils::type_from_named(ctx, name)) + { + Ok(ty) + } else { + utils::build_path(item, ctx) + } + } + TypeKind::Comp(ref info) => { + let template_params = item.all_template_params(ctx); + if info.has_non_type_template_params() || + (item.is_opaque(ctx, &()) && !template_params.is_empty()) + { + return self.try_to_opaque(ctx, item); + } + + utils::build_path(item, ctx) + } + TypeKind::Opaque => self.try_to_opaque(ctx, item), + TypeKind::Pointer(inner) | TypeKind::Reference(inner) => { + let is_const = ctx.resolve_type(inner).is_const(); + + let inner = + inner.into_resolver().through_type_refs().resolve(ctx); + let inner_ty = inner.expect_type(); + + let is_objc_pointer = + matches!(inner_ty.kind(), TypeKind::ObjCInterface(..)); + + // Regardless if we can properly represent the inner type, we + // should always generate a proper pointer here, so use + // infallible conversion of the inner type. + let mut ty = inner.to_rust_ty_or_opaque(ctx, &()); + ty.append_implicit_template_params(ctx, inner); + + // Avoid the first function pointer level, since it's already + // represented in Rust. + if inner_ty.canonical_type(ctx).is_function() || is_objc_pointer + { + Ok(ty) + } else { + Ok(ty.to_ptr(is_const)) + } + } + TypeKind::TypeParam => { + let name = item.canonical_name(ctx); + let ident = ctx.rust_ident(name); + Ok(quote! { + #ident + }) + } + TypeKind::ObjCSel => Ok(quote! { + objc::runtime::Sel + }), + TypeKind::ObjCId => Ok(quote! { + id + }), + TypeKind::ObjCInterface(ref interface) => { + let name = ctx.rust_ident(interface.name()); + Ok(quote! { + #name + }) + } + ref u @ TypeKind::UnresolvedTypeRef(..) => { + unreachable!("Should have been resolved after parsing {:?}!", u) + } + } + } +} + +impl TryToOpaque for TemplateInstantiation { + type Extra = Item; + + fn try_get_layout( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> error::Result { + item.expect_type() + .layout(ctx) + .ok_or(error::Error::NoLayoutForOpaqueBlob) + } +} + +impl TryToRustTy for TemplateInstantiation { + type Extra = Item; + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> error::Result { + if self.is_opaque(ctx, item) { + return Err(error::Error::InstantiationOfOpaqueType); + } + + let def = self + .template_definition() + .into_resolver() + .through_type_refs() + .resolve(ctx); + + let mut ty = quote! {}; + let def_path = def.namespace_aware_canonical_path(ctx); + ty.append_separated( + def_path.into_iter().map(|p| ctx.rust_ident(p)), + quote!(::), + ); + + let def_params = def.self_template_params(ctx); + if def_params.is_empty() { + // This can happen if we generated an opaque type for a partial + // template specialization, and we've hit an instantiation of + // that partial specialization. + extra_assert!(def.is_opaque(ctx, &())); + return Err(error::Error::InstantiationOfOpaqueType); + } + + // TODO: If the definition type is a template class/struct + // definition's member template definition, it could rely on + // generic template parameters from its outer template + // class/struct. When we emit bindings for it, it could require + // *more* type arguments than we have here, and we will need to + // reconstruct them somehow. We don't have any means of doing + // that reconstruction at this time. + + let template_args = self + .template_arguments() + .iter() + .zip(def_params.iter()) + // Only pass type arguments for the type parameters that + // the def uses. + .filter(|&(_, param)| ctx.uses_template_parameter(def.id(), *param)) + .map(|(arg, _)| { + let arg = arg.into_resolver().through_type_refs().resolve(ctx); + let mut ty = arg.try_to_rust_ty(ctx, &())?; + ty.append_implicit_template_params(ctx, arg); + Ok(ty) + }) + .collect::>>()?; + + if template_args.is_empty() { + return Ok(ty); + } + + Ok(quote! { + #ty < #( #template_args ),* > + }) + } +} + +impl TryToRustTy for FunctionSig { + type Extra = (); + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + _: &(), + ) -> error::Result { + // TODO: we might want to consider ignoring the reference return value. + let ret = utils::fnsig_return_ty(ctx, self); + let arguments = utils::fnsig_arguments(ctx, self); + + match self.abi(ctx, None) { + ClangAbi::Known(Abi::ThisCall) + if !ctx.options().rust_features().thiscall_abi => + { + warn!("Skipping function with thiscall ABI that isn't supported by the configured Rust target"); + Ok(proc_macro2::TokenStream::new()) + } + ClangAbi::Known(Abi::Vectorcall) + if !ctx.options().rust_features().vectorcall_abi => + { + warn!("Skipping function with vectorcall ABI that isn't supported by the configured Rust target"); + Ok(proc_macro2::TokenStream::new()) + } + ClangAbi::Known(Abi::CUnwind) + if !ctx.options().rust_features().c_unwind_abi => + { + warn!("Skipping function with C-unwind ABI that isn't supported by the configured Rust target"); + Ok(proc_macro2::TokenStream::new()) + } + ClangAbi::Known(Abi::EfiApi) + if !ctx.options().rust_features().abi_efiapi => + { + warn!("Skipping function with efiapi ABI that isn't supported by the configured Rust target"); + Ok(proc_macro2::TokenStream::new()) + } + abi => Ok(quote! { + unsafe extern #abi fn ( #( #arguments ),* ) #ret + }), + } + } +} + +impl CodeGenerator for Function { + type Extra = Item; + + /// If we've actually generated the symbol, the number of times we've seen + /// it. + type Return = Option; + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) -> Self::Return { + debug!("::codegen: item = {:?}", item); + debug_assert!(item.is_enabled_for_codegen(ctx)); + + let is_internal = matches!(self.linkage(), Linkage::Internal); + + if is_internal && !ctx.options().wrap_static_fns { + // We can't do anything with Internal functions if we are not wrapping them so just + // avoid generating anything for them. + return None; + } + + // Pure virtual methods have no actual symbol, so we can't generate + // something meaningful for them. + let is_dynamic_function = match self.kind() { + FunctionKind::Method(ref method_kind) + if method_kind.is_pure_virtual() => + { + return None; + } + FunctionKind::Function => { + ctx.options().dynamic_library_name.is_some() + } + _ => false, + }; + + // Similar to static member variables in a class template, we can't + // generate bindings to template functions, because the set of + // instantiations is open ended and we have no way of knowing which + // monomorphizations actually exist. + if !item.all_template_params(ctx).is_empty() { + return None; + } + + let name = self.name(); + let mut canonical_name = item.canonical_name(ctx); + let mangled_name = self.mangled_name(); + + { + let seen_symbol_name = mangled_name.unwrap_or(&canonical_name); + + // TODO: Maybe warn here if there's a type/argument mismatch, or + // something? + if result.seen_function(seen_symbol_name) { + return None; + } + result.saw_function(seen_symbol_name); + } + + let signature_item = ctx.resolve_item(self.signature()); + let signature = signature_item.kind().expect_type().canonical_type(ctx); + let signature = match *signature.kind() { + TypeKind::Function(ref sig) => sig, + _ => panic!("Signature kind is not a Function: {:?}", signature), + }; + + let args = utils::fnsig_arguments(ctx, signature); + let ret = utils::fnsig_return_ty(ctx, signature); + + let mut attributes = vec![]; + + if ctx.options().rust_features().must_use_function { + let must_use = signature.must_use() || { + let ret_ty = signature + .return_type() + .into_resolver() + .through_type_refs() + .resolve(ctx); + ret_ty.must_use(ctx) + }; + + if must_use { + attributes.push(attributes::must_use()); + } + } + + if let Some(comment) = item.comment(ctx) { + attributes.push(attributes::doc(comment)); + } + + let abi = match signature.abi(ctx, Some(name)) { + ClangAbi::Known(Abi::ThisCall) + if !ctx.options().rust_features().thiscall_abi => + { + unsupported_abi_diagnostic::( + name, + item.location(), + "thiscall", + ctx, + ); + return None; + } + ClangAbi::Known(Abi::Vectorcall) + if !ctx.options().rust_features().vectorcall_abi => + { + unsupported_abi_diagnostic::( + name, + item.location(), + "vectorcall", + ctx, + ); + return None; + } + ClangAbi::Known(Abi::CUnwind) + if !ctx.options().rust_features().c_unwind_abi => + { + unsupported_abi_diagnostic::( + name, + item.location(), + "C-unwind", + ctx, + ); + return None; + } + ClangAbi::Known(Abi::EfiApi) + if !ctx.options().rust_features().abi_efiapi => + { + unsupported_abi_diagnostic::( + name, + item.location(), + "efiapi", + ctx, + ); + return None; + } + ClangAbi::Known(Abi::Win64) if signature.is_variadic() => { + unsupported_abi_diagnostic::( + name, + item.location(), + "Win64", + ctx, + ); + return None; + } + ClangAbi::Unknown(unknown_abi) => { + panic!( + "Invalid or unknown abi {:?} for function {:?} ({:?})", + unknown_abi, canonical_name, self + ); + } + abi => abi, + }; + + if is_internal && ctx.options().wrap_static_fns { + result.items_to_serialize.push(item.id()); + } + + // Handle overloaded functions by giving each overload its own unique + // suffix. + let times_seen = result.overload_number(&canonical_name); + if times_seen > 0 { + write!(&mut canonical_name, "{}", times_seen).unwrap(); + } + + let mut has_link_name_attr = false; + if let Some(link_name) = self.link_name() { + attributes.push(attributes::link_name::(link_name)); + has_link_name_attr = true; + } else { + let link_name = mangled_name.unwrap_or(name); + if !is_dynamic_function && + !utils::names_will_be_identical_after_mangling( + &canonical_name, + link_name, + Some(abi), + ) + { + attributes.push(attributes::link_name::(link_name)); + has_link_name_attr = true; + } + } + + // Unfortunately this can't piggyback on the `attributes` list because + // the #[link(wasm_import_module)] needs to happen before the `extern + // "C"` block. It doesn't get picked up properly otherwise + let wasm_link_attribute = + ctx.options().wasm_import_module_name.as_ref().map(|name| { + quote! { #[link(wasm_import_module = #name)] } + }); + + if is_internal && ctx.options().wrap_static_fns && !has_link_name_attr { + let name = canonical_name.clone() + ctx.wrap_static_fns_suffix(); + attributes.push(attributes::link_name::(&name)); + } + + let ident = ctx.rust_ident(canonical_name); + let tokens = quote! { + #wasm_link_attribute + extern #abi { + #(#attributes)* + pub fn #ident ( #( #args ),* ) #ret; + } + }; + + // If we're doing dynamic binding generation, add to the dynamic items. + if is_dynamic_function { + let args_identifiers = + utils::fnsig_argument_identifiers(ctx, signature); + let ret_ty = utils::fnsig_return_ty(ctx, signature); + result.dynamic_items().push( + ident, + abi, + signature.is_variadic(), + ctx.options().dynamic_link_require_all, + args, + args_identifiers, + ret, + ret_ty, + attributes, + ctx, + ); + } else { + result.push(tokens); + } + Some(times_seen) + } +} + +fn unsupported_abi_diagnostic( + fn_name: &str, + _location: Option<&crate::clang::SourceLocation>, + abi: &str, + _ctx: &BindgenContext, +) { + warn!( + "Skipping {}function `{}` with the {} ABI that isn't supported by the configured Rust target", + if VARIADIC { "variadic " } else { "" }, + fn_name, + abi + ); + + #[cfg(feature = "experimental")] + if _ctx.options().emit_diagnostics { + use crate::diagnostics::{get_line, Diagnostic, Level, Slice}; + + let mut diag = Diagnostic::default(); + diag + .with_title(format!( + "The `{}` {}function uses the {} ABI which is not supported by the configured Rust target.", + fn_name, + if VARIADIC { "variadic " } else { "" }, + abi), Level::Warn) + .add_annotation("No code will be generated for this function.", Level::Warn) + .add_annotation(format!("The configured Rust version is {}.", String::from(_ctx.options().rust_target)), Level::Note); + + if let Some(loc) = _location { + let (file, line, col, _) = loc.location(); + + if let Some(filename) = file.name() { + if let Ok(Some(source)) = get_line(&filename, line) { + let mut slice = Slice::default(); + slice + .with_source(source) + .with_location(filename, line, col); + diag.add_slice(slice); + } + } + } + + diag.display() + } +} + +fn objc_method_codegen( + ctx: &BindgenContext, + method: &ObjCMethod, + methods: &mut Vec, + class_name: Option<&str>, + rust_class_name: &str, + prefix: &str, +) { + // This would ideally resolve the method into an Item, and use + // Item::process_before_codegen; however, ObjC methods are not currently + // made into function items. + let name = format!("{}::{}{}", rust_class_name, prefix, method.rust_name()); + if ctx.options().blocklisted_items.matches(name) { + return; + } + + let signature = method.signature(); + let fn_args = utils::fnsig_arguments(ctx, signature); + let fn_ret = utils::fnsig_return_ty(ctx, signature); + + let sig = if method.is_class_method() { + quote! { + ( #( #fn_args ),* ) #fn_ret + } + } else { + let self_arr = [quote! { &self }]; + let args = self_arr.iter().chain(fn_args.iter()); + quote! { + ( #( #args ),* ) #fn_ret + } + }; + + let methods_and_args = method.format_method_call(&fn_args); + + let body = { + let body = if method.is_class_method() { + let class_name = ctx.rust_ident( + class_name + .expect("Generating a class method without class name?"), + ); + quote!(msg_send!(class!(#class_name), #methods_and_args)) + } else { + quote!(msg_send!(*self, #methods_and_args)) + }; + + ctx.wrap_unsafe_ops(body) + }; + + let method_name = + ctx.rust_ident(format!("{}{}", prefix, method.rust_name())); + + methods.push(quote! { + unsafe fn #method_name #sig where ::Target: objc::Message + Sized { + #body + } + }); +} + +impl CodeGenerator for ObjCInterface { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + debug_assert!(item.is_enabled_for_codegen(ctx)); + + let mut impl_items = vec![]; + let rust_class_name = item.path_for_allowlisting(ctx)[1..].join("::"); + + for method in self.methods() { + objc_method_codegen( + ctx, + method, + &mut impl_items, + None, + &rust_class_name, + "", + ); + } + + for class_method in self.class_methods() { + let ambiquity = self + .methods() + .iter() + .map(|m| m.rust_name()) + .any(|x| x == class_method.rust_name()); + let prefix = if ambiquity { "class_" } else { "" }; + objc_method_codegen( + ctx, + class_method, + &mut impl_items, + Some(self.name()), + &rust_class_name, + prefix, + ); + } + + let trait_name = ctx.rust_ident(self.rust_name()); + let trait_constraints = quote! { + Sized + std::ops::Deref + }; + let trait_block = if self.is_template() { + let template_names: Vec = self + .template_names + .iter() + .map(|g| ctx.rust_ident(g)) + .collect(); + + quote! { + pub trait #trait_name <#(#template_names:'static),*> : #trait_constraints { + #( #impl_items )* + } + } + } else { + quote! { + pub trait #trait_name : #trait_constraints { + #( #impl_items )* + } + } + }; + + let class_name = ctx.rust_ident(self.name()); + if !self.is_category() && !self.is_protocol() { + let struct_block = quote! { + #[repr(transparent)] + #[derive(Debug, Copy, Clone)] + pub struct #class_name(pub id); + impl std::ops::Deref for #class_name { + type Target = objc::runtime::Object; + fn deref(&self) -> &Self::Target { + unsafe { + &*self.0 + } + } + } + unsafe impl objc::Message for #class_name { } + impl #class_name { + pub fn alloc() -> Self { + Self(unsafe { + msg_send!(class!(#class_name), alloc) + }) + } + } + }; + result.push(struct_block); + let mut protocol_set: HashSet = Default::default(); + for protocol_id in self.conforms_to.iter() { + protocol_set.insert(*protocol_id); + let protocol_name = ctx.rust_ident( + ctx.resolve_type(protocol_id.expect_type_id(ctx)) + .name() + .unwrap(), + ); + let impl_trait = quote! { + impl #protocol_name for #class_name { } + }; + result.push(impl_trait); + } + let mut parent_class = self.parent_class; + while let Some(parent_id) = parent_class { + let parent = parent_id + .expect_type_id(ctx) + .into_resolver() + .through_type_refs() + .resolve(ctx) + .expect_type() + .kind(); + + let parent = match parent { + TypeKind::ObjCInterface(ref parent) => parent, + _ => break, + }; + parent_class = parent.parent_class; + + let parent_name = ctx.rust_ident(parent.rust_name()); + let impl_trait = if parent.is_template() { + let template_names: Vec = parent + .template_names + .iter() + .map(|g| ctx.rust_ident(g)) + .collect(); + quote! { + impl <#(#template_names :'static),*> #parent_name <#(#template_names),*> for #class_name { + } + } + } else { + quote! { + impl #parent_name for #class_name { } + } + }; + result.push(impl_trait); + for protocol_id in parent.conforms_to.iter() { + if protocol_set.insert(*protocol_id) { + let protocol_name = ctx.rust_ident( + ctx.resolve_type(protocol_id.expect_type_id(ctx)) + .name() + .unwrap(), + ); + let impl_trait = quote! { + impl #protocol_name for #class_name { } + }; + result.push(impl_trait); + } + } + if !parent.is_template() { + let parent_struct_name = parent.name(); + let child_struct_name = self.name(); + let parent_struct = ctx.rust_ident(parent_struct_name); + let from_block = quote! { + impl From<#class_name> for #parent_struct { + fn from(child: #class_name) -> #parent_struct { + #parent_struct(child.0) + } + } + }; + result.push(from_block); + + let error_msg = format!( + "This {} cannot be downcasted to {}", + parent_struct_name, child_struct_name + ); + let try_into_block = quote! { + impl std::convert::TryFrom<#parent_struct> for #class_name { + type Error = &'static str; + fn try_from(parent: #parent_struct) -> Result<#class_name, Self::Error> { + let is_kind_of : bool = unsafe { msg_send!(parent, isKindOfClass:class!(#class_name))}; + if is_kind_of { + Ok(#class_name(parent.0)) + } else { + Err(#error_msg) + } + } + } + }; + result.push(try_into_block); + } + } + } + + if !self.is_protocol() { + let impl_block = if self.is_template() { + let template_names: Vec = self + .template_names + .iter() + .map(|g| ctx.rust_ident(g)) + .collect(); + quote! { + impl <#(#template_names :'static),*> #trait_name <#(#template_names),*> for #class_name { + } + } + } else { + quote! { + impl #trait_name for #class_name { + } + } + }; + result.push(impl_block); + } + + result.push(trait_block); + result.saw_objc(); + } +} + +pub(crate) fn codegen( + context: BindgenContext, +) -> Result<(proc_macro2::TokenStream, BindgenOptions), CodegenError> { + context.gen(|context| { + let _t = context.timer("codegen"); + let counter = Cell::new(0); + let mut result = CodegenResult::new(&counter); + + debug!("codegen: {:?}", context.options()); + + if context.options().emit_ir { + let codegen_items = context.codegen_items(); + for (id, item) in context.items() { + if codegen_items.contains(&id) { + println!("ir: {:?} = {:#?}", id, item); + } + } + } + + if let Some(path) = context.options().emit_ir_graphviz.as_ref() { + match dot::write_dot_file(context, path) { + Ok(()) => info!( + "Your dot file was generated successfully into: {}", + path + ), + Err(e) => warn!("{}", e), + } + } + + if let Some(spec) = context.options().depfile.as_ref() { + match spec.write(context.deps()) { + Ok(()) => info!( + "Your depfile was generated successfully into: {}", + spec.depfile_path.display() + ), + Err(e) => warn!("{}", e), + } + } + + context.resolve_item(context.root_module()).codegen( + context, + &mut result, + &(), + ); + + if let Some(ref lib_name) = context.options().dynamic_library_name { + let lib_ident = context.rust_ident(lib_name); + let dynamic_items_tokens = + result.dynamic_items().get_tokens(lib_ident, context); + result.push(dynamic_items_tokens); + } + + utils::serialize_items(&result, context)?; + + Ok(postprocessing::postprocessing( + result.items, + context.options(), + )) + }) +} + +pub(crate) mod utils { + use super::serialize::CSerialize; + use super::{error, CodegenError, CodegenResult, ToRustTyOrOpaque}; + use crate::ir::context::BindgenContext; + use crate::ir::function::{Abi, ClangAbi, FunctionSig}; + use crate::ir::item::{Item, ItemCanonicalPath}; + use crate::ir::ty::TypeKind; + use crate::{args_are_cpp, file_is_cpp}; + use std::borrow::Cow; + use std::io::Write; + use std::mem; + use std::path::PathBuf; + use std::str::FromStr; + + pub(super) fn serialize_items( + result: &CodegenResult, + context: &BindgenContext, + ) -> Result<(), CodegenError> { + if result.items_to_serialize.is_empty() { + return Ok(()); + } + + let path = context + .options() + .wrap_static_fns_path + .as_ref() + .map(PathBuf::from) + .unwrap_or_else(|| { + std::env::temp_dir().join("bindgen").join("extern") + }); + + let dir = path.parent().unwrap(); + + if !dir.exists() { + std::fs::create_dir_all(dir)?; + } + + let is_cpp = args_are_cpp(&context.options().clang_args) || + context + .options() + .input_headers + .iter() + .any(|h| file_is_cpp(h)); + + let source_path = path.with_extension(if is_cpp { "cpp" } else { "c" }); + + let mut code = Vec::new(); + + if !context.options().input_headers.is_empty() { + for header in &context.options().input_headers { + writeln!(code, "#include \"{}\"", header)?; + } + + writeln!(code)?; + } + + if !context.options().input_header_contents.is_empty() { + for (name, contents) in &context.options().input_header_contents { + writeln!(code, "// {}\n{}", name, contents)?; + } + + writeln!(code)?; + } + + writeln!(code, "// Static wrappers\n")?; + + for &id in &result.items_to_serialize { + let item = context.resolve_item(id); + item.serialize(context, (), &mut vec![], &mut code)?; + } + + std::fs::write(source_path, code)?; + + Ok(()) + } + + pub(crate) fn prepend_bitfield_unit_type( + ctx: &BindgenContext, + result: &mut Vec, + ) { + let bitfield_unit_src = include_str!("./bitfield_unit.rs"); + let bitfield_unit_src = if ctx.options().rust_features().min_const_fn { + Cow::Borrowed(bitfield_unit_src) + } else { + Cow::Owned(bitfield_unit_src.replace("const fn ", "fn ")) + }; + let bitfield_unit_type = + proc_macro2::TokenStream::from_str(&bitfield_unit_src).unwrap(); + let bitfield_unit_type = quote!(#bitfield_unit_type); + + let items = vec![bitfield_unit_type]; + let old_items = mem::replace(result, items); + result.extend(old_items); + } + + pub(crate) fn prepend_objc_header( + ctx: &BindgenContext, + result: &mut Vec, + ) { + let use_objc = if ctx.options().objc_extern_crate { + quote! { + #[macro_use] + extern crate objc; + } + } else { + quote! { + use objc::{self, msg_send, sel, sel_impl, class}; + } + }; + + let id_type = quote! { + #[allow(non_camel_case_types)] + pub type id = *mut objc::runtime::Object; + }; + + let items = vec![use_objc, id_type]; + let old_items = mem::replace(result, items); + result.extend(old_items.into_iter()); + } + + pub(crate) fn prepend_block_header( + ctx: &BindgenContext, + result: &mut Vec, + ) { + let use_block = if ctx.options().block_extern_crate { + quote! { + extern crate block; + } + } else { + quote! { + use block; + } + }; + + let items = vec![use_block]; + let old_items = mem::replace(result, items); + result.extend(old_items.into_iter()); + } + + pub(crate) fn prepend_union_types( + ctx: &BindgenContext, + result: &mut Vec, + ) { + let prefix = ctx.trait_prefix(); + + // If the target supports `const fn`, declare eligible functions + // as `const fn` else just `fn`. + let const_fn = if ctx.options().rust_features().min_const_fn { + quote! { const fn } + } else { + quote! { fn } + }; + + // TODO(emilio): The fmt::Debug impl could be way nicer with + // std::intrinsics::type_name, but... + let union_field_decl = quote! { + #[repr(C)] + pub struct __BindgenUnionField(::#prefix::marker::PhantomData); + }; + + let transmute = + ctx.wrap_unsafe_ops(quote!(::#prefix::mem::transmute(self))); + + let union_field_impl = quote! { + impl __BindgenUnionField { + #[inline] + pub #const_fn new() -> Self { + __BindgenUnionField(::#prefix::marker::PhantomData) + } + + #[inline] + pub unsafe fn as_ref(&self) -> &T { + #transmute + } + + #[inline] + pub unsafe fn as_mut(&mut self) -> &mut T { + #transmute + } + } + }; + + let union_field_default_impl = quote! { + impl ::#prefix::default::Default for __BindgenUnionField { + #[inline] + fn default() -> Self { + Self::new() + } + } + }; + + let union_field_clone_impl = quote! { + impl ::#prefix::clone::Clone for __BindgenUnionField { + #[inline] + fn clone(&self) -> Self { + Self::new() + } + } + }; + + let union_field_copy_impl = quote! { + impl ::#prefix::marker::Copy for __BindgenUnionField {} + }; + + let union_field_debug_impl = quote! { + impl ::#prefix::fmt::Debug for __BindgenUnionField { + fn fmt(&self, fmt: &mut ::#prefix::fmt::Formatter<'_>) + -> ::#prefix::fmt::Result { + fmt.write_str("__BindgenUnionField") + } + } + }; + + // The actual memory of the filed will be hashed, so that's why these + // field doesn't do anything with the hash. + let union_field_hash_impl = quote! { + impl ::#prefix::hash::Hash for __BindgenUnionField { + fn hash(&self, _state: &mut H) { + } + } + }; + + let union_field_partialeq_impl = quote! { + impl ::#prefix::cmp::PartialEq for __BindgenUnionField { + fn eq(&self, _other: &__BindgenUnionField) -> bool { + true + } + } + }; + + let union_field_eq_impl = quote! { + impl ::#prefix::cmp::Eq for __BindgenUnionField { + } + }; + + let items = vec![ + union_field_decl, + union_field_impl, + union_field_default_impl, + union_field_clone_impl, + union_field_copy_impl, + union_field_debug_impl, + union_field_hash_impl, + union_field_partialeq_impl, + union_field_eq_impl, + ]; + + let old_items = mem::replace(result, items); + result.extend(old_items.into_iter()); + } + + pub(crate) fn prepend_incomplete_array_types( + ctx: &BindgenContext, + result: &mut Vec, + ) { + let prefix = ctx.trait_prefix(); + + // If the target supports `const fn`, declare eligible functions + // as `const fn` else just `fn`. + let const_fn = if ctx.options().rust_features().min_const_fn { + quote! { const fn } + } else { + quote! { fn } + }; + + let incomplete_array_decl = quote! { + #[repr(C)] + #[derive(Default)] + pub struct __IncompleteArrayField( + ::#prefix::marker::PhantomData, [T; 0]); + }; + + let from_raw_parts = ctx.wrap_unsafe_ops(quote! ( + ::#prefix::slice::from_raw_parts(self.as_ptr(), len) + )); + let from_raw_parts_mut = ctx.wrap_unsafe_ops(quote! ( + ::#prefix::slice::from_raw_parts_mut(self.as_mut_ptr(), len) + )); + + let incomplete_array_impl = quote! { + impl __IncompleteArrayField { + #[inline] + pub #const_fn new() -> Self { + __IncompleteArrayField(::#prefix::marker::PhantomData, []) + } + + #[inline] + pub fn as_ptr(&self) -> *const T { + self as *const _ as *const T + } + + #[inline] + pub fn as_mut_ptr(&mut self) -> *mut T { + self as *mut _ as *mut T + } + + #[inline] + pub unsafe fn as_slice(&self, len: usize) -> &[T] { + #from_raw_parts + } + + #[inline] + pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { + #from_raw_parts_mut + } + } + }; + + let incomplete_array_debug_impl = quote! { + impl ::#prefix::fmt::Debug for __IncompleteArrayField { + fn fmt(&self, fmt: &mut ::#prefix::fmt::Formatter<'_>) + -> ::#prefix::fmt::Result { + fmt.write_str("__IncompleteArrayField") + } + } + }; + + let items = vec![ + incomplete_array_decl, + incomplete_array_impl, + incomplete_array_debug_impl, + ]; + + let old_items = mem::replace(result, items); + result.extend(old_items.into_iter()); + } + + pub(crate) fn prepend_complex_type( + result: &mut Vec, + ) { + let complex_type = quote! { + #[derive(PartialEq, Copy, Clone, Hash, Debug, Default)] + #[repr(C)] + pub struct __BindgenComplex { + pub re: T, + pub im: T + } + }; + + let items = vec![complex_type]; + let old_items = mem::replace(result, items); + result.extend(old_items.into_iter()); + } + + pub(crate) fn build_path( + item: &Item, + ctx: &BindgenContext, + ) -> error::Result { + let path = item.namespace_aware_canonical_path(ctx); + let tokens = + proc_macro2::TokenStream::from_str(&path.join("::")).unwrap(); + + Ok(tokens) + } + + fn primitive_ty( + ctx: &BindgenContext, + name: &str, + ) -> proc_macro2::TokenStream { + let ident = ctx.rust_ident_raw(name); + quote! { + #ident + } + } + + pub(crate) fn type_from_named( + ctx: &BindgenContext, + name: &str, + ) -> Option { + // FIXME: We could use the inner item to check this is really a + // primitive type but, who the heck overrides these anyway? + Some(match name { + "int8_t" => primitive_ty(ctx, "i8"), + "uint8_t" => primitive_ty(ctx, "u8"), + "int16_t" => primitive_ty(ctx, "i16"), + "uint16_t" => primitive_ty(ctx, "u16"), + "int32_t" => primitive_ty(ctx, "i32"), + "uint32_t" => primitive_ty(ctx, "u32"), + "int64_t" => primitive_ty(ctx, "i64"), + "uint64_t" => primitive_ty(ctx, "u64"), + + "size_t" if ctx.options().size_t_is_usize => { + primitive_ty(ctx, "usize") + } + "uintptr_t" => primitive_ty(ctx, "usize"), + + "ssize_t" if ctx.options().size_t_is_usize => { + primitive_ty(ctx, "isize") + } + "intptr_t" | "ptrdiff_t" => primitive_ty(ctx, "isize"), + _ => return None, + }) + } + + fn fnsig_return_ty_internal( + ctx: &BindgenContext, + sig: &FunctionSig, + include_arrow: bool, + ) -> proc_macro2::TokenStream { + if sig.is_divergent() { + return if include_arrow { + quote! { -> ! } + } else { + quote! { ! } + }; + } + + let canonical_type_kind = sig + .return_type() + .into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(ctx) + .kind() + .expect_type() + .kind(); + + if let TypeKind::Void = canonical_type_kind { + return if include_arrow { + quote! {} + } else { + quote! { () } + }; + } + + let ret_ty = sig.return_type().to_rust_ty_or_opaque(ctx, &()); + if include_arrow { + quote! { -> #ret_ty } + } else { + ret_ty + } + } + + pub(crate) fn fnsig_return_ty( + ctx: &BindgenContext, + sig: &FunctionSig, + ) -> proc_macro2::TokenStream { + fnsig_return_ty_internal(ctx, sig, /* include_arrow = */ true) + } + + pub(crate) fn fnsig_arguments( + ctx: &BindgenContext, + sig: &FunctionSig, + ) -> Vec { + use super::ToPtr; + + let mut unnamed_arguments = 0; + let mut args = sig + .argument_types() + .iter() + .map(|&(ref name, ty)| { + let arg_item = ctx.resolve_item(ty); + let arg_ty = arg_item.kind().expect_type(); + + // From the C90 standard[1]: + // + // A declaration of a parameter as "array of type" shall be + // adjusted to "qualified pointer to type", where the type + // qualifiers (if any) are those specified within the [ and ] of + // the array type derivation. + // + // [1]: http://c0x.coding-guidelines.com/6.7.5.3.html + let arg_ty = match *arg_ty.canonical_type(ctx).kind() { + TypeKind::Array(t, _) => { + let stream = + if ctx.options().array_pointers_in_arguments { + arg_ty.to_rust_ty_or_opaque(ctx, arg_item) + } else { + t.to_rust_ty_or_opaque(ctx, &()) + }; + stream.to_ptr(ctx.resolve_type(t).is_const()) + } + TypeKind::Pointer(inner) => { + let inner = ctx.resolve_item(inner); + let inner_ty = inner.expect_type(); + if let TypeKind::ObjCInterface(ref interface) = + *inner_ty.canonical_type(ctx).kind() + { + let name = ctx.rust_ident(interface.name()); + quote! { + #name + } + } else { + arg_item.to_rust_ty_or_opaque(ctx, &()) + } + } + _ => arg_item.to_rust_ty_or_opaque(ctx, &()), + }; + + let arg_name = match *name { + Some(ref name) => ctx.rust_mangle(name).into_owned(), + None => { + unnamed_arguments += 1; + format!("arg{}", unnamed_arguments) + } + }; + + assert!(!arg_name.is_empty()); + let arg_name = ctx.rust_ident(arg_name); + + quote! { + #arg_name : #arg_ty + } + }) + .collect::>(); + + if sig.is_variadic() { + args.push(quote! { ... }) + } + + args + } + + pub(crate) fn fnsig_argument_identifiers( + ctx: &BindgenContext, + sig: &FunctionSig, + ) -> Vec { + let mut unnamed_arguments = 0; + let args = sig + .argument_types() + .iter() + .map(|&(ref name, _ty)| { + let arg_name = match *name { + Some(ref name) => ctx.rust_mangle(name).into_owned(), + None => { + unnamed_arguments += 1; + format!("arg{}", unnamed_arguments) + } + }; + + assert!(!arg_name.is_empty()); + let arg_name = ctx.rust_ident(arg_name); + + quote! { + #arg_name + } + }) + .collect::>(); + + args + } + + pub(crate) fn fnsig_block( + ctx: &BindgenContext, + sig: &FunctionSig, + ) -> proc_macro2::TokenStream { + let args = sig.argument_types().iter().map(|&(_, ty)| { + let arg_item = ctx.resolve_item(ty); + + arg_item.to_rust_ty_or_opaque(ctx, &()) + }); + + let ret_ty = fnsig_return_ty_internal( + ctx, sig, /* include_arrow = */ false, + ); + quote! { + *const ::block::Block<(#(#args,)*), #ret_ty> + } + } + + // Returns true if `canonical_name` will end up as `mangled_name` at the + // machine code level, i.e. after LLVM has applied any target specific + // mangling. + pub(crate) fn names_will_be_identical_after_mangling( + canonical_name: &str, + mangled_name: &str, + call_conv: Option, + ) -> bool { + // If the mangled name and the canonical name are the same then no + // mangling can have happened between the two versions. + if canonical_name == mangled_name { + return true; + } + + // Working with &[u8] makes indexing simpler than with &str + let canonical_name = canonical_name.as_bytes(); + let mangled_name = mangled_name.as_bytes(); + + let (mangling_prefix, expect_suffix) = match call_conv { + Some(ClangAbi::Known(Abi::C)) | + // None is the case for global variables + None => { + (b'_', false) + } + Some(ClangAbi::Known(Abi::Stdcall)) => (b'_', true), + Some(ClangAbi::Known(Abi::Fastcall)) => (b'@', true), + + // This is something we don't recognize, stay on the safe side + // by emitting the `#[link_name]` attribute + Some(_) => return false, + }; + + // Check that the mangled name is long enough to at least contain the + // canonical name plus the expected prefix. + if mangled_name.len() < canonical_name.len() + 1 { + return false; + } + + // Return if the mangled name does not start with the prefix expected + // for the given calling convention. + if mangled_name[0] != mangling_prefix { + return false; + } + + // Check that the mangled name contains the canonical name after the + // prefix + if &mangled_name[1..canonical_name.len() + 1] != canonical_name { + return false; + } + + // If the given calling convention also prescribes a suffix, check that + // it exists too + if expect_suffix { + let suffix = &mangled_name[canonical_name.len() + 1..]; + + // The shortest suffix is "@0" + if suffix.len() < 2 { + return false; + } + + // Check that the suffix starts with '@' and is all ASCII decimals + // after that. + if suffix[0] != b'@' || !suffix[1..].iter().all(u8::is_ascii_digit) + { + return false; + } + } else if mangled_name.len() != canonical_name.len() + 1 { + // If we don't expect a prefix but there is one, we need the + // #[link_name] attribute + return false; + } + + true + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/postprocessing/merge_extern_blocks.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/postprocessing/merge_extern_blocks.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/postprocessing/merge_extern_blocks.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/postprocessing/merge_extern_blocks.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,72 @@ +use syn::{ + visit_mut::{visit_file_mut, visit_item_mod_mut, VisitMut}, + File, Item, ItemForeignMod, ItemMod, +}; + +pub(super) fn merge_extern_blocks(file: &mut File) { + Visitor.visit_file_mut(file) +} + +struct Visitor; + +impl VisitMut for Visitor { + fn visit_file_mut(&mut self, file: &mut File) { + visit_items(&mut file.items); + visit_file_mut(self, file) + } + + fn visit_item_mod_mut(&mut self, item_mod: &mut ItemMod) { + if let Some((_, ref mut items)) = item_mod.content { + visit_items(items); + } + visit_item_mod_mut(self, item_mod) + } +} + +fn visit_items(items: &mut Vec) { + // Keep all the extern blocks in a different `Vec` for faster search. + let mut extern_blocks = Vec::::new(); + + for item in std::mem::take(items) { + if let Item::ForeignMod(ItemForeignMod { + attrs, + abi, + brace_token, + unsafety, + items: extern_block_items, + }) = item + { + let mut exists = false; + for extern_block in &mut extern_blocks { + // Check if there is a extern block with the same ABI and + // attributes. + if extern_block.attrs == attrs && extern_block.abi == abi { + // Merge the items of the two blocks. + extern_block.items.extend_from_slice(&extern_block_items); + exists = true; + break; + } + } + // If no existing extern block had the same ABI and attributes, store + // it. + if !exists { + extern_blocks.push(ItemForeignMod { + attrs, + abi, + brace_token, + unsafety, + items: extern_block_items, + }); + } + } else { + // If the item is not an extern block, we don't have to do anything and just + // push it back. + items.push(item); + } + } + + // Move all the extern blocks alongside the rest of the items. + for extern_block in extern_blocks { + items.push(Item::ForeignMod(extern_block)); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/postprocessing/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/postprocessing/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/postprocessing/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/postprocessing/mod.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,57 @@ +use proc_macro2::TokenStream; +use quote::ToTokens; +use syn::{parse2, File}; + +use crate::BindgenOptions; + +mod merge_extern_blocks; +mod sort_semantically; + +use merge_extern_blocks::merge_extern_blocks; +use sort_semantically::sort_semantically; + +struct PostProcessingPass { + should_run: fn(&BindgenOptions) -> bool, + run: fn(&mut File), +} + +// TODO: This can be a const fn when mutable references are allowed in const +// context. +macro_rules! pass { + ($pass:ident) => { + PostProcessingPass { + should_run: |options| options.$pass, + run: |file| $pass(file), + } + }; +} + +const PASSES: &[PostProcessingPass] = + &[pass!(merge_extern_blocks), pass!(sort_semantically)]; + +pub(crate) fn postprocessing( + items: Vec, + options: &BindgenOptions, +) -> TokenStream { + let items = items.into_iter().collect(); + let require_syn = PASSES.iter().any(|pass| (pass.should_run)(options)); + + if !require_syn { + return items; + } + + // This syn business is a hack, for now. This means that we are re-parsing already + // generated code using `syn` (as opposed to `quote`) because `syn` provides us more + // control over the elements. + // The `unwrap` here is deliberate because bindgen should generate valid rust items at all + // times. + let mut file = parse2::(items).unwrap(); + + for pass in PASSES { + if (pass.should_run)(options) { + (pass.run)(&mut file); + } + } + + file.into_token_stream() +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/postprocessing/sort_semantically.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/postprocessing/sort_semantically.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/postprocessing/sort_semantically.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/postprocessing/sort_semantically.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,46 @@ +use syn::{ + visit_mut::{visit_file_mut, visit_item_mod_mut, VisitMut}, + File, Item, ItemMod, +}; + +pub(super) fn sort_semantically(file: &mut File) { + Visitor.visit_file_mut(file) +} + +struct Visitor; + +impl VisitMut for Visitor { + fn visit_file_mut(&mut self, file: &mut File) { + visit_items(&mut file.items); + visit_file_mut(self, file) + } + + fn visit_item_mod_mut(&mut self, item_mod: &mut ItemMod) { + if let Some((_, ref mut items)) = item_mod.content { + visit_items(items); + } + visit_item_mod_mut(self, item_mod) + } +} + +fn visit_items(items: &mut [Item]) { + items.sort_by_key(|item| match item { + Item::Type(_) => 0, + Item::Struct(_) => 1, + Item::Const(_) => 2, + Item::Fn(_) => 3, + Item::Enum(_) => 4, + Item::Union(_) => 5, + Item::Static(_) => 6, + Item::Trait(_) => 7, + Item::TraitAlias(_) => 8, + Item::Impl(_) => 9, + Item::Mod(_) => 10, + Item::Use(_) => 11, + Item::Verbatim(_) => 12, + Item::ExternCrate(_) => 13, + Item::ForeignMod(_) => 14, + Item::Macro(_) => 15, + _ => 18, + }); +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/serialize.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/serialize.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/serialize.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/serialize.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,358 @@ +use std::io::Write; + +use crate::callbacks::IntKind; + +use crate::ir::comp::CompKind; +use crate::ir::context::{BindgenContext, TypeId}; +use crate::ir::function::{Function, FunctionKind}; +use crate::ir::item::Item; +use crate::ir::item::ItemCanonicalName; +use crate::ir::item_kind::ItemKind; +use crate::ir::ty::{FloatKind, Type, TypeKind}; + +use super::CodegenError; + +fn get_loc(item: &Item) -> String { + item.location() + .map(|x| x.to_string()) + .unwrap_or_else(|| "unknown".to_owned()) +} + +pub(crate) trait CSerialize<'a> { + type Extra; + + fn serialize( + &self, + ctx: &BindgenContext, + extra: Self::Extra, + stack: &mut Vec, + writer: &mut W, + ) -> Result<(), CodegenError>; +} + +impl<'a> CSerialize<'a> for Item { + type Extra = (); + + fn serialize( + &self, + ctx: &BindgenContext, + (): Self::Extra, + stack: &mut Vec, + writer: &mut W, + ) -> Result<(), CodegenError> { + match self.kind() { + ItemKind::Function(func) => { + func.serialize(ctx, self, stack, writer) + } + kind => Err(CodegenError::Serialize { + msg: format!("Cannot serialize item kind {:?}", kind), + loc: get_loc(self), + }), + } + } +} + +impl<'a> CSerialize<'a> for Function { + type Extra = &'a Item; + + fn serialize( + &self, + ctx: &BindgenContext, + item: Self::Extra, + stack: &mut Vec, + writer: &mut W, + ) -> Result<(), CodegenError> { + if self.kind() != FunctionKind::Function { + return Err(CodegenError::Serialize { + msg: format!( + "Cannot serialize function kind {:?}", + self.kind(), + ), + loc: get_loc(item), + }); + } + + let signature = match ctx.resolve_type(self.signature()).kind() { + TypeKind::Function(signature) => signature, + _ => unreachable!(), + }; + + let name = self.name(); + + // Function argoments stored as `(name, type_id)` tuples. + let args = { + let mut count = 0; + + signature + .argument_types() + .iter() + .cloned() + .map(|(opt_name, type_id)| { + ( + opt_name.unwrap_or_else(|| { + let name = format!("arg_{}", count); + count += 1; + name + }), + type_id, + ) + }) + .collect::>() + }; + + // The name used for the wrapper self. + let wrap_name = format!("{}{}", name, ctx.wrap_static_fns_suffix()); + // The function's return type + let ret_ty = signature.return_type(); + + // Write `ret_ty wrap_name(args) { return name(arg_names)' }` + ret_ty.serialize(ctx, (), stack, writer)?; + write!(writer, " {}(", wrap_name)?; + serialize_args(&args, ctx, writer)?; + write!(writer, ") {{ return {}(", name)?; + serialize_sep(", ", args.iter(), ctx, writer, |(name, _), _, buf| { + write!(buf, "{}", name).map_err(From::from) + })?; + writeln!(writer, "); }}")?; + + Ok(()) + } +} + +impl<'a> CSerialize<'a> for TypeId { + type Extra = (); + + fn serialize( + &self, + ctx: &BindgenContext, + (): Self::Extra, + stack: &mut Vec, + writer: &mut W, + ) -> Result<(), CodegenError> { + let item = ctx.resolve_item(*self); + item.expect_type().serialize(ctx, item, stack, writer) + } +} + +impl<'a> CSerialize<'a> for Type { + type Extra = &'a Item; + + fn serialize( + &self, + ctx: &BindgenContext, + item: Self::Extra, + stack: &mut Vec, + writer: &mut W, + ) -> Result<(), CodegenError> { + match self.kind() { + TypeKind::Void => { + if self.is_const() { + write!(writer, "const ")?; + } + write!(writer, "void")? + } + TypeKind::NullPtr => { + if self.is_const() { + write!(writer, "const ")?; + } + write!(writer, "nullptr_t")? + } + TypeKind::Int(int_kind) => { + if self.is_const() { + write!(writer, "const ")?; + } + match int_kind { + IntKind::Bool => write!(writer, "bool")?, + IntKind::SChar => write!(writer, "signed char")?, + IntKind::UChar => write!(writer, "unsigned char")?, + IntKind::WChar => write!(writer, "wchar_t")?, + IntKind::Short => write!(writer, "short")?, + IntKind::UShort => write!(writer, "unsigned short")?, + IntKind::Int => write!(writer, "int")?, + IntKind::UInt => write!(writer, "unsigned int")?, + IntKind::Long => write!(writer, "long")?, + IntKind::ULong => write!(writer, "unsigned long")?, + IntKind::LongLong => write!(writer, "long long")?, + IntKind::ULongLong => write!(writer, "unsigned long long")?, + IntKind::Char { .. } => write!(writer, "char")?, + int_kind => { + return Err(CodegenError::Serialize { + msg: format!( + "Cannot serialize integer kind {:?}", + int_kind + ), + loc: get_loc(item), + }) + } + } + } + TypeKind::Float(float_kind) => { + if self.is_const() { + write!(writer, "const ")?; + } + match float_kind { + FloatKind::Float => write!(writer, "float")?, + FloatKind::Double => write!(writer, "double")?, + FloatKind::LongDouble => write!(writer, "long double")?, + FloatKind::Float128 => write!(writer, "__float128")?, + } + } + TypeKind::Complex(float_kind) => { + if self.is_const() { + write!(writer, "const ")?; + } + match float_kind { + FloatKind::Float => write!(writer, "float complex")?, + FloatKind::Double => write!(writer, "double complex")?, + FloatKind::LongDouble => { + write!(writer, "long double complex")? + } + FloatKind::Float128 => write!(writer, "__complex128")?, + } + } + TypeKind::Alias(type_id) => { + if let Some(name) = self.name() { + if self.is_const() { + write!(writer, "const {}", name)?; + } else { + write!(writer, "{}", name)?; + } + } else { + type_id.serialize(ctx, (), stack, writer)?; + } + } + TypeKind::Array(type_id, length) => { + type_id.serialize(ctx, (), stack, writer)?; + write!(writer, " [{}]", length)? + } + TypeKind::Function(signature) => { + if self.is_const() { + stack.push("const ".to_string()); + } + + signature.return_type().serialize( + ctx, + (), + &mut vec![], + writer, + )?; + + write!(writer, " (")?; + while let Some(item) = stack.pop() { + write!(writer, "{}", item)?; + } + write!(writer, ")")?; + + write!(writer, " (")?; + serialize_sep( + ", ", + signature.argument_types().iter(), + ctx, + writer, + |(name, type_id), ctx, buf| { + let mut stack = vec![]; + if let Some(name) = name { + stack.push(name.clone()); + } + type_id.serialize(ctx, (), &mut stack, buf) + }, + )?; + write!(writer, ")")? + } + TypeKind::ResolvedTypeRef(type_id) => { + if self.is_const() { + write!(writer, "const ")?; + } + type_id.serialize(ctx, (), stack, writer)? + } + TypeKind::Pointer(type_id) => { + if self.is_const() { + stack.push("*const ".to_owned()); + } else { + stack.push("*".to_owned()); + } + type_id.serialize(ctx, (), stack, writer)? + } + TypeKind::Comp(comp_info) => { + if self.is_const() { + write!(writer, "const ")?; + } + + let name = item.canonical_name(ctx); + + match comp_info.kind() { + CompKind::Struct => write!(writer, "struct {}", name)?, + CompKind::Union => write!(writer, "union {}", name)?, + }; + } + TypeKind::Enum(_enum_ty) => { + if self.is_const() { + write!(writer, "const ")?; + } + + let name = item.canonical_name(ctx); + write!(writer, "enum {}", name)?; + } + ty => { + return Err(CodegenError::Serialize { + msg: format!("Cannot serialize type kind {:?}", ty), + loc: get_loc(item), + }) + } + }; + + if !stack.is_empty() { + write!(writer, " ")?; + while let Some(item) = stack.pop() { + write!(writer, "{}", item)?; + } + } + + Ok(()) + } +} + +fn serialize_args( + args: &[(String, TypeId)], + ctx: &BindgenContext, + writer: &mut W, +) -> Result<(), CodegenError> { + if args.is_empty() { + write!(writer, "void")?; + } else { + serialize_sep( + ", ", + args.iter(), + ctx, + writer, + |(name, type_id), ctx, buf| { + type_id.serialize(ctx, (), &mut vec![name.clone()], buf) + }, + )?; + } + + Ok(()) +} + +fn serialize_sep< + W: Write, + F: FnMut(I::Item, &BindgenContext, &mut W) -> Result<(), CodegenError>, + I: Iterator, +>( + sep: &str, + mut iter: I, + ctx: &BindgenContext, + buf: &mut W, + mut f: F, +) -> Result<(), CodegenError> { + if let Some(item) = iter.next() { + f(item, ctx, buf)?; + let sep = sep.as_bytes(); + for item in iter { + buf.write_all(sep)?; + f(item, ctx, buf)?; + } + } + + Ok(()) +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/struct_layout.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/struct_layout.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/struct_layout.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/codegen/struct_layout.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,444 @@ +//! Helpers for code generation that need struct layout + +use super::helpers; + +use crate::ir::comp::CompInfo; +use crate::ir::context::BindgenContext; +use crate::ir::layout::Layout; +use crate::ir::ty::{Type, TypeKind}; +use proc_macro2::{self, Ident, Span}; +use std::cmp; + +const MAX_GUARANTEED_ALIGN: usize = 8; + +/// Trace the layout of struct. +#[derive(Debug)] +pub(crate) struct StructLayoutTracker<'a> { + name: &'a str, + ctx: &'a BindgenContext, + comp: &'a CompInfo, + is_packed: bool, + known_type_layout: Option, + is_rust_union: bool, + can_copy_union_fields: bool, + latest_offset: usize, + padding_count: usize, + latest_field_layout: Option, + max_field_align: usize, + last_field_was_bitfield: bool, +} + +/// Returns a size aligned to a given value. +pub(crate) fn align_to(size: usize, align: usize) -> usize { + if align == 0 { + return size; + } + + let rem = size % align; + if rem == 0 { + return size; + } + + size + align - rem +} + +/// Returns the lower power of two byte count that can hold at most n bits. +pub(crate) fn bytes_from_bits_pow2(mut n: usize) -> usize { + if n == 0 { + return 0; + } + + if n <= 8 { + return 1; + } + + if !n.is_power_of_two() { + n = n.next_power_of_two(); + } + + n / 8 +} + +#[test] +fn test_align_to() { + assert_eq!(align_to(1, 1), 1); + assert_eq!(align_to(1, 2), 2); + assert_eq!(align_to(1, 4), 4); + assert_eq!(align_to(5, 1), 5); + assert_eq!(align_to(17, 4), 20); +} + +#[test] +fn test_bytes_from_bits_pow2() { + assert_eq!(bytes_from_bits_pow2(0), 0); + for i in 1..9 { + assert_eq!(bytes_from_bits_pow2(i), 1); + } + for i in 9..17 { + assert_eq!(bytes_from_bits_pow2(i), 2); + } + for i in 17..33 { + assert_eq!(bytes_from_bits_pow2(i), 4); + } +} + +impl<'a> StructLayoutTracker<'a> { + pub(crate) fn new( + ctx: &'a BindgenContext, + comp: &'a CompInfo, + ty: &'a Type, + name: &'a str, + ) -> Self { + let known_type_layout = ty.layout(ctx); + let is_packed = comp.is_packed(ctx, known_type_layout.as_ref()); + let (is_rust_union, can_copy_union_fields) = + comp.is_rust_union(ctx, known_type_layout.as_ref(), name); + StructLayoutTracker { + name, + ctx, + comp, + is_packed, + known_type_layout, + is_rust_union, + can_copy_union_fields, + latest_offset: 0, + padding_count: 0, + latest_field_layout: None, + max_field_align: 0, + last_field_was_bitfield: false, + } + } + + pub(crate) fn can_copy_union_fields(&self) -> bool { + self.can_copy_union_fields + } + + pub(crate) fn is_rust_union(&self) -> bool { + self.is_rust_union + } + + pub(crate) fn saw_vtable(&mut self) { + debug!("saw vtable for {}", self.name); + + let ptr_size = self.ctx.target_pointer_size(); + self.latest_offset += ptr_size; + self.latest_field_layout = Some(Layout::new(ptr_size, ptr_size)); + self.max_field_align = ptr_size; + } + + pub(crate) fn saw_base(&mut self, base_ty: &Type) { + debug!("saw base for {}", self.name); + if let Some(layout) = base_ty.layout(self.ctx) { + self.align_to_latest_field(layout); + + self.latest_offset += self.padding_bytes(layout) + layout.size; + self.latest_field_layout = Some(layout); + self.max_field_align = cmp::max(self.max_field_align, layout.align); + } + } + + pub(crate) fn saw_bitfield_unit(&mut self, layout: Layout) { + debug!("saw bitfield unit for {}: {:?}", self.name, layout); + + self.align_to_latest_field(layout); + + self.latest_offset += layout.size; + + debug!( + "Offset: : {} -> {}", + self.latest_offset - layout.size, + self.latest_offset + ); + + self.latest_field_layout = Some(layout); + self.last_field_was_bitfield = true; + // NB: We intentionally don't update the max_field_align here, since our + // bitfields code doesn't necessarily guarantee it, so we need to + // actually generate the dummy alignment. + } + + /// Returns a padding field if necessary for a given new field _before_ + /// adding that field. + pub(crate) fn saw_field( + &mut self, + field_name: &str, + field_ty: &Type, + field_offset: Option, + ) -> Option { + let mut field_layout = field_ty.layout(self.ctx)?; + + if let TypeKind::Array(inner, len) = + *field_ty.canonical_type(self.ctx).kind() + { + // FIXME(emilio): As an _ultra_ hack, we correct the layout returned + // by arrays of structs that have a bigger alignment than what we + // can support. + // + // This means that the structs in the array are super-unsafe to + // access, since they won't be properly aligned, but there's not too + // much we can do about it. + if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx) + { + if layout.align > MAX_GUARANTEED_ALIGN { + field_layout.size = + align_to(layout.size, layout.align) * len; + field_layout.align = MAX_GUARANTEED_ALIGN; + } + } + } + self.saw_field_with_layout(field_name, field_layout, field_offset) + } + + pub(crate) fn saw_field_with_layout( + &mut self, + field_name: &str, + field_layout: Layout, + field_offset: Option, + ) -> Option { + let will_merge_with_bitfield = self.align_to_latest_field(field_layout); + + let is_union = self.comp.is_union(); + let padding_bytes = match field_offset { + Some(offset) if offset / 8 > self.latest_offset => { + offset / 8 - self.latest_offset + } + _ => { + if will_merge_with_bitfield || + field_layout.align == 0 || + is_union + { + 0 + } else if !self.is_packed { + self.padding_bytes(field_layout) + } else if let Some(l) = self.known_type_layout { + self.padding_bytes(l) + } else { + 0 + } + } + }; + + self.latest_offset += padding_bytes; + + let padding_layout = if self.is_packed || is_union { + None + } else { + let force_padding = self.ctx.options().force_explicit_padding; + + // Otherwise the padding is useless. + let need_padding = force_padding || + padding_bytes >= field_layout.align || + field_layout.align > MAX_GUARANTEED_ALIGN; + + debug!( + "Offset: : {} -> {}", + self.latest_offset - padding_bytes, + self.latest_offset + ); + + debug!( + "align field {} to {}/{} with {} padding bytes {:?}", + field_name, + self.latest_offset, + field_offset.unwrap_or(0) / 8, + padding_bytes, + field_layout + ); + + let padding_align = if force_padding { + 1 + } else { + cmp::min(field_layout.align, MAX_GUARANTEED_ALIGN) + }; + + if need_padding && padding_bytes != 0 { + Some(Layout::new(padding_bytes, padding_align)) + } else { + None + } + }; + + self.latest_offset += field_layout.size; + self.latest_field_layout = Some(field_layout); + self.max_field_align = + cmp::max(self.max_field_align, field_layout.align); + self.last_field_was_bitfield = false; + + debug!( + "Offset: {}: {} -> {}", + field_name, + self.latest_offset - field_layout.size, + self.latest_offset + ); + + padding_layout.map(|layout| self.padding_field(layout)) + } + + pub(crate) fn add_tail_padding( + &mut self, + comp_name: &str, + comp_layout: Layout, + ) -> Option { + // Only emit an padding field at the end of a struct if the + // user configures explicit padding. + if !self.ctx.options().force_explicit_padding { + return None; + } + + // Padding doesn't make sense for rust unions. + if self.is_rust_union { + return None; + } + + if self.latest_offset == comp_layout.size { + // This struct does not contain tail padding. + return None; + } + + trace!( + "need a tail padding field for {}: offset {} -> size {}", + comp_name, + self.latest_offset, + comp_layout.size + ); + let size = comp_layout.size - self.latest_offset; + Some(self.padding_field(Layout::new(size, 0))) + } + + pub(crate) fn pad_struct( + &mut self, + layout: Layout, + ) -> Option { + debug!( + "pad_struct:\n\tself = {:#?}\n\tlayout = {:#?}", + self, layout + ); + + if layout.size < self.latest_offset { + warn!( + "Calculated wrong layout for {}, too more {} bytes", + self.name, + self.latest_offset - layout.size + ); + return None; + } + + let padding_bytes = layout.size - self.latest_offset; + if padding_bytes == 0 { + return None; + } + + let repr_align = self.ctx.options().rust_features().repr_align; + + // We always pad to get to the correct size if the struct is one of + // those we can't align properly. + // + // Note that if the last field we saw was a bitfield, we may need to pad + // regardless, because bitfields don't respect alignment as strictly as + // other fields. + if padding_bytes >= layout.align || + (self.last_field_was_bitfield && + padding_bytes >= self.latest_field_layout.unwrap().align) || + (!repr_align && layout.align > MAX_GUARANTEED_ALIGN) + { + let layout = if self.is_packed { + Layout::new(padding_bytes, 1) + } else if self.last_field_was_bitfield || + layout.align > MAX_GUARANTEED_ALIGN + { + // We've already given up on alignment here. + Layout::for_size(self.ctx, padding_bytes) + } else { + Layout::new(padding_bytes, layout.align) + }; + + debug!("pad bytes to struct {}, {:?}", self.name, layout); + + Some(self.padding_field(layout)) + } else { + None + } + } + + pub(crate) fn requires_explicit_align(&self, layout: Layout) -> bool { + let repr_align = self.ctx.options().rust_features().repr_align; + + // Always force explicit repr(align) for stuff more than 16-byte aligned + // to work-around https://github.com/rust-lang/rust/issues/54341. + // + // Worst-case this just generates redundant alignment attributes. + if repr_align && self.max_field_align >= 16 { + return true; + } + + if self.max_field_align >= layout.align { + return false; + } + + // We can only generate up-to a 8-bytes of alignment unless we support + // repr(align). + repr_align || layout.align <= MAX_GUARANTEED_ALIGN + } + + fn padding_bytes(&self, layout: Layout) -> usize { + align_to(self.latest_offset, layout.align) - self.latest_offset + } + + fn padding_field(&mut self, layout: Layout) -> proc_macro2::TokenStream { + let ty = helpers::blob(self.ctx, layout); + let padding_count = self.padding_count; + + self.padding_count += 1; + + let padding_field_name = Ident::new( + &format!("__bindgen_padding_{}", padding_count), + Span::call_site(), + ); + + self.max_field_align = cmp::max(self.max_field_align, layout.align); + + quote! { + pub #padding_field_name : #ty , + } + } + + /// Returns whether the new field is known to merge with a bitfield. + /// + /// This is just to avoid doing the same check also in pad_field. + fn align_to_latest_field(&mut self, new_field_layout: Layout) -> bool { + if self.is_packed { + // Skip to align fields when packed. + return false; + } + + let layout = match self.latest_field_layout { + Some(l) => l, + None => return false, + }; + + // If it was, we may or may not need to align, depending on what the + // current field alignment and the bitfield size and alignment are. + debug!( + "align_to_bitfield? {}: {:?} {:?}", + self.last_field_was_bitfield, layout, new_field_layout + ); + + // Avoid divide-by-zero errors if align is 0. + let align = cmp::max(1, layout.align); + + if self.last_field_was_bitfield && + new_field_layout.align <= layout.size % align && + new_field_layout.size <= layout.size % align + { + // The new field will be coalesced into some of the remaining bits. + // + // FIXME(emilio): I think this may not catch everything? + debug!("Will merge with bitfield"); + return true; + } + + // Else, just align the obvious way. + self.latest_offset += self.padding_bytes(layout); + false + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/csmith-fuzzing/README.md clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/csmith-fuzzing/README.md --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/csmith-fuzzing/README.md 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/csmith-fuzzing/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,65 +0,0 @@ -# Fuzzing `bindgen` with `csmith` - -[`csmith`][csmith] generates random C and C++ programs that can be used as test -cases for compilers. When testing `bindgen` with `csmith`, we interpret the -generated programs as header files, and emit Rust bindings to them. If `bindgen` -panics, the emitted bindings won't compile with `rustc`, or the generated layout -tests in the bindings fail, then we report an issue containing the test case! - - - - - -- [Prerequisites](#prerequisites) -- [Running the Fuzzer](#running-the-fuzzer) -- [Reporting Issues](#reporting-issues) - - - -## Prerequisites - -Requires `python3`, `csmith`, and `creduce` to be in `$PATH`. - -Many OS package managers have `csmith` and `creduce` packages: - -``` -$ sudo apt install csmith creduce -$ brew install csmith creduce -$ # Etc... -``` - -## Running the Fuzzer - -Run `csmith` and test `bindgen` on the generated test cases with this command: - -``` -$ ./driver.py -``` - -The driver will keep running until it encounters an error in `bindgen`. - -Each invocation of `./driver.py` will use its own temporary directories, so -running it in multiple terminals in parallel is supported. - -`csmith` is run with `--no-checksum --nomain --max-block-size 1 ---max-block-depth 1` which disables the `main` function, and makes function -bodies as simple as possible as `bindgen` does not care about them, but they -cannot be completely disabled in `csmith`. Run `csmith --help` to see what -exactly those options do. - -## Reporting Issues - -Once the fuzz driver finds a test case that causes some kind of error in -`bindgen` or its emitted bindings, it is helpful to -[run C-Reduce on the test case][creducing] to remove the parts that are -irrelevant to reproducing the error. This is ***very*** helpful for the folks -who further investigate the issue and come up with a fix! - -Additionally, mention that you discovered the issue via `csmith` and we will add -the `A-csmith` label. You can find all the issues discovered with `csmith`, and -related to fuzzing with `csmith`, by looking up -[all issues tagged with the `A-csmith` label][csmith-issues]. - -[csmith]: https://github.com/csmith-project/csmith -[creducing]: ../CONTRIBUTING.md#using-creduce-to-minimize-test-cases -[csmith-issues]: https://github.com/rust-lang/rust-bindgen/issues?q=label%3AA-csmith diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/deps.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/deps.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/deps.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/deps.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,20 @@ +/// Generating build depfiles from parsed bindings. +use std::{collections::BTreeSet, path::PathBuf}; + +#[derive(Clone, Debug)] +pub(crate) struct DepfileSpec { + pub output_module: String, + pub depfile_path: PathBuf, +} + +impl DepfileSpec { + pub fn write(&self, deps: &BTreeSet) -> std::io::Result<()> { + let mut buf = format!("{}:", self.output_module); + + for file in deps { + buf = format!("{} {}", buf, file); + } + + std::fs::write(&self.depfile_path, &buf) + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/diagnostics.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/diagnostics.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/diagnostics.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/diagnostics.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,189 @@ +//! Types and function used to emit pretty diagnostics for `bindgen`. +//! +//! The entry point of this module is the [`Diagnostic`] type. + +use std::fmt::Write; +use std::io::{self, BufRead, BufReader}; +use std::{borrow::Cow, fs::File}; + +use annotate_snippets::{ + display_list::{DisplayList, FormatOptions}, + snippet::{Annotation, Slice as ExtSlice, Snippet}, +}; + +use annotate_snippets::snippet::AnnotationType; + +#[derive(Clone, Copy, Debug)] +pub(crate) enum Level { + Error, + Warn, + Info, + Note, + Help, +} + +impl From for AnnotationType { + fn from(level: Level) -> Self { + match level { + Level::Error => Self::Error, + Level::Warn => Self::Warning, + Level::Info => Self::Info, + Level::Note => Self::Note, + Level::Help => Self::Help, + } + } +} + +/// A `bindgen` diagnostic. +#[derive(Default)] +pub(crate) struct Diagnostic<'a> { + title: Option<(Cow<'a, str>, Level)>, + slices: Vec>, + footer: Vec<(Cow<'a, str>, Level)>, +} + +impl<'a> Diagnostic<'a> { + /// Add a title to the diagnostic and set its type. + pub(crate) fn with_title( + &mut self, + title: impl Into>, + level: Level, + ) -> &mut Self { + self.title = Some((title.into(), level)); + self + } + + /// Add a slice of source code to the diagnostic. + pub(crate) fn add_slice(&mut self, slice: Slice<'a>) -> &mut Self { + self.slices.push(slice); + self + } + + /// Add a footer annotation to the diagnostic. This annotation will have its own type. + pub(crate) fn add_annotation( + &mut self, + msg: impl Into>, + level: Level, + ) -> &mut Self { + self.footer.push((msg.into(), level)); + self + } + + /// Print this diagnostic. + /// + /// The diagnostic is printed using `cargo:warning` if `bindgen` is being invoked by a build + /// script or using `eprintln` otherwise. + pub(crate) fn display(&self) { + std::thread_local! { + static INVOKED_BY_BUILD_SCRIPT: bool = std::env::var_os("CARGO_CFG_TARGET_ARCH").is_some(); + } + + let mut title = None; + let mut footer = vec![]; + let mut slices = vec![]; + if let Some((msg, level)) = &self.title { + title = Some(Annotation { + id: Some("bindgen"), + label: Some(msg.as_ref()), + annotation_type: (*level).into(), + }) + } + + for (msg, level) in &self.footer { + footer.push(Annotation { + id: None, + label: Some(msg.as_ref()), + annotation_type: (*level).into(), + }); + } + + // add additional info that this is generated by bindgen + // so as to not confuse with rustc warnings + footer.push(Annotation { + id: None, + label: Some("This diagnostic was generated by bindgen."), + annotation_type: AnnotationType::Info, + }); + + for slice in &self.slices { + if let Some(source) = &slice.source { + slices.push(ExtSlice { + source: source.as_ref(), + line_start: slice.line.unwrap_or_default(), + origin: slice.filename.as_deref(), + annotations: vec![], + fold: false, + }) + } + } + + let snippet = Snippet { + title, + footer, + slices, + opt: FormatOptions { + color: true, + ..Default::default() + }, + }; + let dl = DisplayList::from(snippet); + + if INVOKED_BY_BUILD_SCRIPT.with(Clone::clone) { + // This is just a hack which hides the `warning:` added by cargo at the beginning of + // every line. This should be fine as our diagnostics already have a colorful title. + // FIXME (pvdrz): Could it be that this doesn't work in other languages? + let hide_warning = "\r \r"; + let string = dl.to_string(); + for line in string.lines() { + println!("cargo:warning={}{}", hide_warning, line); + } + } else { + eprintln!("{}\n", dl); + } + } +} + +/// A slice of source code. +#[derive(Default)] +pub(crate) struct Slice<'a> { + source: Option>, + filename: Option, + line: Option, +} + +impl<'a> Slice<'a> { + /// Set the source code. + pub(crate) fn with_source( + &mut self, + source: impl Into>, + ) -> &mut Self { + self.source = Some(source.into()); + self + } + + /// Set the file, line and column. + pub(crate) fn with_location( + &mut self, + mut name: String, + line: usize, + col: usize, + ) -> &mut Self { + write!(name, ":{}:{}", line, col) + .expect("Writing to a string cannot fail"); + self.filename = Some(name); + self.line = Some(line); + self + } +} + +pub(crate) fn get_line( + filename: &str, + line: usize, +) -> io::Result> { + let file = BufReader::new(File::open(filename)?); + if let Some(line) = file.lines().nth(line.wrapping_sub(1)) { + return line.map(Some); + } + + Ok(None) +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/extra_assertions.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/extra_assertions.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/extra_assertions.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/extra_assertions.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,34 @@ +//! Macros for defining extra assertions that should only be checked in testing +//! and/or CI when the `testing_only_extra_assertions` feature is enabled. + +/// Simple macro that forwards to assert! when using +/// testing_only_extra_assertions. +#[macro_export] +macro_rules! extra_assert { + ( $cond:expr ) => { + if cfg!(feature = "testing_only_extra_assertions") { + assert!($cond); + } + }; + ( $cond:expr , $( $arg:tt )+ ) => { + if cfg!(feature = "testing_only_extra_assertions") { + assert!($cond, $( $arg )* ) + } + }; +} + +/// Simple macro that forwards to assert_eq! when using +/// testing_only_extra_assertions. +#[macro_export] +macro_rules! extra_assert_eq { + ( $lhs:expr , $rhs:expr ) => { + if cfg!(feature = "testing_only_extra_assertions") { + assert_eq!($lhs, $rhs); + } + }; + ( $lhs:expr , $rhs:expr , $( $arg:tt )+ ) => { + if cfg!(feature = "testing_only_extra_assertions") { + assert!($lhs, $rhs, $( $arg )* ); + } + }; +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/features.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/features.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/features.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/features.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,323 @@ +//! Contains code for selecting features + +#![deny(unused_extern_crates)] +#![deny(clippy::missing_docs_in_private_items)] +#![allow(deprecated)] + +use std::io; +use std::str::FromStr; + +/// Define RustTarget struct definition, Default impl, and conversions +/// between RustTarget and String. +macro_rules! rust_target_def { + ( $( $( #[$attr:meta] )* => $release:ident => $value:expr; )* ) => { + /// Represents the version of the Rust language to target. + /// + /// To support a beta release, use the corresponding stable release. + /// + /// This enum will have more variants added as necessary. + #[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Hash)] + #[allow(non_camel_case_types)] + pub enum RustTarget { + $( + $( + #[$attr] + )* + $release, + )* + } + + impl Default for RustTarget { + /// Gives the latest stable Rust version + fn default() -> RustTarget { + LATEST_STABLE_RUST + } + } + + impl FromStr for RustTarget { + type Err = io::Error; + + /// Create a `RustTarget` from a string. + /// + /// * The stable/beta versions of Rust are of the form "1.0", + /// "1.19", etc. + /// * The nightly version should be specified with "nightly". + fn from_str(s: &str) -> Result { + match s.as_ref() { + $( + stringify!($value) => Ok(RustTarget::$release), + )* + _ => Err( + io::Error::new( + io::ErrorKind::InvalidInput, + concat!( + "Got an invalid rust target. Accepted values ", + "are of the form ", + "\"1.0\" or \"nightly\"."))), + } + } + } + + impl From for String { + fn from(target: RustTarget) -> Self { + match target { + $( + RustTarget::$release => stringify!($value), + )* + }.into() + } + } + } +} + +/// Defines an array slice with all RustTarget values +macro_rules! rust_target_values_def { + ( $( $( #[$attr:meta] )* => $release:ident => $value:expr; )* ) => { + /// Strings of allowed `RustTarget` values + pub static RUST_TARGET_STRINGS: &'static [&str] = &[ + $( + stringify!($value), + )* + ]; + } +} + +/// Defines macro which takes a macro +macro_rules! rust_target_base { + ( $x_macro:ident ) => { + $x_macro!( + /// Rust stable 1.0 + #[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_0 => 1.0; + /// Rust stable 1.17 + /// * Static lifetime elision ([RFC 1623](https://github.com/rust-lang/rfcs/blob/master/text/1623-static.md)) + #[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_17 => 1.17; + /// Rust stable 1.19 + /// * Untagged unions ([RFC 1444](https://github.com/rust-lang/rfcs/blob/master/text/1444-union.md)) + #[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_19 => 1.19; + /// Rust stable 1.20 + /// * Associated constants ([PR](https://github.com/rust-lang/rust/pull/42809)) + #[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_20 => 1.20; + /// Rust stable 1.21 + /// * Builtin impls for `Clone` ([PR](https://github.com/rust-lang/rust/pull/43690)) + #[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_21 => 1.21; + /// Rust stable 1.25 + /// * `repr(align)` ([PR](https://github.com/rust-lang/rust/pull/47006)) + #[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_25 => 1.25; + /// Rust stable 1.26 + /// * [i128 / u128 support](https://doc.rust-lang.org/std/primitive.i128.html) + #[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_26 => 1.26; + /// Rust stable 1.27 + /// * `must_use` attribute on functions ([PR](https://github.com/rust-lang/rust/pull/48925)) + #[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_27 => 1.27; + /// Rust stable 1.28 + /// * `repr(transparent)` ([PR](https://github.com/rust-lang/rust/pull/51562)) + #[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_28 => 1.28; + /// Rust stable 1.30 + /// * `const fn` support for limited cases ([PR](https://github.com/rust-lang/rust/pull/54835/) + /// * [c_void available in core](https://doc.rust-lang.org/core/ffi/enum.c_void.html) + #[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_30 => 1.30; + /// Rust stable 1.33 + /// * repr(packed(N)) ([PR](https://github.com/rust-lang/rust/pull/57049)) + => Stable_1_33 => 1.33; + /// Rust stable 1.36 + /// * `MaybeUninit` instead of `mem::uninitialized()` ([PR](https://github.com/rust-lang/rust/pull/60445)) + => Stable_1_36 => 1.36; + /// Rust stable 1.40 + /// * `non_exhaustive` enums/structs ([Tracking issue](https://github.com/rust-lang/rust/issues/44109)) + => Stable_1_40 => 1.40; + /// Rust stable 1.47 + /// * `larger_arrays` ([Tracking issue](https://github.com/rust-lang/rust/pull/74060)) + => Stable_1_47 => 1.47; + /// Rust stable 1.64 + /// * `core_ffi_c` ([Tracking issue](https://github.com/rust-lang/rust/issues/94501)) + => Stable_1_64 => 1.64; + /// Rust stable 1.68 + /// * `abi_efiapi` calling convention ([Tracking issue](https://github.com/rust-lang/rust/issues/65815)) + => Stable_1_68 => 1.68; + /// Nightly rust + /// * `thiscall` calling convention ([Tracking issue](https://github.com/rust-lang/rust/issues/42202)) + /// * `vectorcall` calling convention (no tracking issue) + /// * `c_unwind` calling convention ([Tracking issue](https://github.com/rust-lang/rust/issues/74990)) + => Nightly => nightly; + ); + } +} + +rust_target_base!(rust_target_def); +rust_target_base!(rust_target_values_def); + +/// Latest stable release of Rust +pub const LATEST_STABLE_RUST: RustTarget = RustTarget::Stable_1_68; + +/// Create RustFeatures struct definition, new(), and a getter for each field +macro_rules! rust_feature_def { + ( + $( $rust_target:ident { + $( $( #[$attr:meta] )* => $feature:ident; )* + } )* + ) => { + /// Features supported by a rust target + #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] + #[allow(missing_docs)] // Documentation should go into the relevant variants. + pub(crate) struct RustFeatures { + $( $( + $( + #[$attr] + )* + pub $feature: bool, + )* )* + } + + impl RustFeatures { + /// Gives a RustFeatures struct with all features disabled + fn new() -> Self { + RustFeatures { + $( $( + $feature: false, + )* )* + } + } + } + + impl From for RustFeatures { + fn from(rust_target: RustTarget) -> Self { + let mut features = RustFeatures::new(); + + $( + if rust_target >= RustTarget::$rust_target { + $( + features.$feature = true; + )* + } + )* + + features + } + } + } +} + +// NOTE(emilio): When adding or removing features here, make sure to update the +// documentation for the relevant variant in the rust_target_base macro +// definition. +rust_feature_def!( + Stable_1_17 { + => static_lifetime_elision; + } + Stable_1_19 { + => untagged_union; + } + Stable_1_20 { + => associated_const; + } + Stable_1_21 { + => builtin_clone_impls; + } + Stable_1_25 { + => repr_align; + } + Stable_1_26 { + => i128_and_u128; + } + Stable_1_27 { + => must_use_function; + } + Stable_1_28 { + => repr_transparent; + } + Stable_1_30 { + => min_const_fn; + => core_ffi_c_void; + } + Stable_1_33 { + => repr_packed_n; + } + Stable_1_36 { + => maybe_uninit; + } + Stable_1_40 { + => non_exhaustive; + } + Stable_1_47 { + => larger_arrays; + } + Stable_1_64 { + => core_ffi_c; + } + Stable_1_68 { + => abi_efiapi; + } + Nightly { + => thiscall_abi; + => vectorcall_abi; + => c_unwind_abi; + } +); + +impl Default for RustFeatures { + fn default() -> Self { + let default_rust_target: RustTarget = Default::default(); + Self::from(default_rust_target) + } +} + +#[cfg(test)] +mod test { + #![allow(unused_imports)] + use super::*; + + #[test] + fn target_features() { + let f_1_0 = RustFeatures::from(RustTarget::Stable_1_0); + assert!( + !f_1_0.static_lifetime_elision && + !f_1_0.core_ffi_c_void && + !f_1_0.untagged_union && + !f_1_0.associated_const && + !f_1_0.builtin_clone_impls && + !f_1_0.repr_align && + !f_1_0.thiscall_abi && + !f_1_0.vectorcall_abi + ); + let f_1_21 = RustFeatures::from(RustTarget::Stable_1_21); + assert!( + f_1_21.static_lifetime_elision && + !f_1_21.core_ffi_c_void && + f_1_21.untagged_union && + f_1_21.associated_const && + f_1_21.builtin_clone_impls && + !f_1_21.repr_align && + !f_1_21.thiscall_abi && + !f_1_21.vectorcall_abi + ); + let f_nightly = RustFeatures::from(RustTarget::Nightly); + assert!( + f_nightly.static_lifetime_elision && + f_nightly.core_ffi_c_void && + f_nightly.untagged_union && + f_nightly.associated_const && + f_nightly.builtin_clone_impls && + f_nightly.maybe_uninit && + f_nightly.repr_align && + f_nightly.thiscall_abi && + f_nightly.vectorcall_abi && + f_nightly.c_unwind_abi + ); + } + + fn test_target(target_str: &str, target: RustTarget) { + let target_string: String = target.into(); + assert_eq!(target_str, target_string); + assert_eq!(target, RustTarget::from_str(target_str).unwrap()); + } + + #[test] + fn str_to_target() { + test_target("1.0", RustTarget::Stable_1_0); + test_target("1.17", RustTarget::Stable_1_17); + test_target("1.19", RustTarget::Stable_1_19); + test_target("1.21", RustTarget::Stable_1_21); + test_target("1.25", RustTarget::Stable_1_25); + test_target("nightly", RustTarget::Nightly); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/derive.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/derive.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/derive.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/derive.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,732 @@ +//! Determining which types for which we cannot emit `#[derive(Trait)]`. + +use std::fmt; + +use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; +use crate::ir::analysis::has_vtable::HasVtable; +use crate::ir::comp::CompKind; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::derive::CanDerive; +use crate::ir::function::FunctionSig; +use crate::ir::item::{IsOpaque, Item}; +use crate::ir::layout::Layout; +use crate::ir::template::TemplateParameters; +use crate::ir::traversal::{EdgeKind, Trace}; +use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT; +use crate::ir::ty::{Type, TypeKind}; +use crate::{Entry, HashMap, HashSet}; + +/// Which trait to consider when doing the `CannotDerive` analysis. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum DeriveTrait { + /// The `Copy` trait. + Copy, + /// The `Debug` trait. + Debug, + /// The `Default` trait. + Default, + /// The `Hash` trait. + Hash, + /// The `PartialEq` and `PartialOrd` traits. + PartialEqOrPartialOrd, +} + +/// An analysis that finds for each IR item whether a trait cannot be derived. +/// +/// We use the monotone constraint function `cannot_derive`, defined as follows +/// for type T: +/// +/// * If T is Opaque and the layout of the type is known, get this layout as an +/// opaquetype and check whether it can derive using trivial checks. +/// +/// * If T is Array, a trait cannot be derived if the array is incomplete, +/// if the length of the array is larger than the limit (unless the trait +/// allows it), or the trait cannot be derived for the type of data the array +/// contains. +/// +/// * If T is Vector, a trait cannot be derived if the trait cannot be derived +/// for the type of data the vector contains. +/// +/// * If T is a type alias, a templated alias or an indirection to another type, +/// the trait cannot be derived if the trait cannot be derived for type T +/// refers to. +/// +/// * If T is a compound type, the trait cannot be derived if the trait cannot +/// be derived for any of its base members or fields. +/// +/// * If T is an instantiation of an abstract template definition, the trait +/// cannot be derived if any of the template arguments or template definition +/// cannot derive the trait. +/// +/// * For all other (simple) types, compiler and standard library limitations +/// dictate whether the trait is implemented. +#[derive(Debug, Clone)] +pub(crate) struct CannotDerive<'ctx> { + ctx: &'ctx BindgenContext, + + derive_trait: DeriveTrait, + + // The incremental result of this analysis's computation. + // Contains information whether particular item can derive `derive_trait` + can_derive: HashMap, + + // Dependencies saying that if a key ItemId has been inserted into the + // `cannot_derive_partialeq_or_partialord` set, then each of the ids + // in Vec need to be considered again. + // + // This is a subset of the natural IR graph with reversed edges, where we + // only include the edges from the IR graph that can affect whether a type + // can derive `derive_trait`. + dependencies: HashMap>, +} + +type EdgePredicate = fn(EdgeKind) -> bool; + +fn consider_edge_default(kind: EdgeKind) -> bool { + match kind { + // These are the only edges that can affect whether a type can derive + EdgeKind::BaseMember | + EdgeKind::Field | + EdgeKind::TypeReference | + EdgeKind::VarType | + EdgeKind::TemplateArgument | + EdgeKind::TemplateDeclaration | + EdgeKind::TemplateParameterDefinition => true, + + EdgeKind::Constructor | + EdgeKind::Destructor | + EdgeKind::FunctionReturn | + EdgeKind::FunctionParameter | + EdgeKind::InnerType | + EdgeKind::InnerVar | + EdgeKind::Method | + EdgeKind::Generic => false, + } +} + +impl<'ctx> CannotDerive<'ctx> { + fn insert>( + &mut self, + id: Id, + can_derive: CanDerive, + ) -> ConstrainResult { + let id = id.into(); + trace!( + "inserting {:?} can_derive<{}>={:?}", + id, + self.derive_trait, + can_derive + ); + + if let CanDerive::Yes = can_derive { + return ConstrainResult::Same; + } + + match self.can_derive.entry(id) { + Entry::Occupied(mut entry) => { + if *entry.get() < can_derive { + entry.insert(can_derive); + ConstrainResult::Changed + } else { + ConstrainResult::Same + } + } + Entry::Vacant(entry) => { + entry.insert(can_derive); + ConstrainResult::Changed + } + } + } + + fn constrain_type(&mut self, item: &Item, ty: &Type) -> CanDerive { + if !self.ctx.allowlisted_items().contains(&item.id()) { + let can_derive = self + .ctx + .blocklisted_type_implements_trait(item, self.derive_trait); + match can_derive { + CanDerive::Yes => trace!( + " blocklisted type explicitly implements {}", + self.derive_trait + ), + CanDerive::Manually => trace!( + " blocklisted type requires manual implementation of {}", + self.derive_trait + ), + CanDerive::No => trace!( + " cannot derive {} for blocklisted type", + self.derive_trait + ), + } + return can_derive; + } + + if self.derive_trait.not_by_name(self.ctx, item) { + trace!( + " cannot derive {} for explicitly excluded type", + self.derive_trait + ); + return CanDerive::No; + } + + trace!("ty: {:?}", ty); + if item.is_opaque(self.ctx, &()) { + if !self.derive_trait.can_derive_union() && + ty.is_union() && + self.ctx.options().untagged_union + { + trace!( + " cannot derive {} for Rust unions", + self.derive_trait + ); + return CanDerive::No; + } + + let layout_can_derive = + ty.layout(self.ctx).map_or(CanDerive::Yes, |l| { + l.opaque().array_size_within_derive_limit(self.ctx) + }); + + match layout_can_derive { + CanDerive::Yes => { + trace!( + " we can trivially derive {} for the layout", + self.derive_trait + ); + } + _ => { + trace!( + " we cannot derive {} for the layout", + self.derive_trait + ); + } + }; + return layout_can_derive; + } + + match *ty.kind() { + // Handle the simple cases. These can derive traits without further + // information. + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Complex(..) | + TypeKind::Float(..) | + TypeKind::Enum(..) | + TypeKind::TypeParam | + TypeKind::UnresolvedTypeRef(..) | + TypeKind::Reference(..) | + TypeKind::ObjCInterface(..) | + TypeKind::ObjCId | + TypeKind::ObjCSel => { + return self.derive_trait.can_derive_simple(ty.kind()); + } + TypeKind::Pointer(inner) => { + let inner_type = + self.ctx.resolve_type(inner).canonical_type(self.ctx); + if let TypeKind::Function(ref sig) = *inner_type.kind() { + self.derive_trait.can_derive_fnptr(sig) + } else { + self.derive_trait.can_derive_pointer() + } + } + TypeKind::Function(ref sig) => { + self.derive_trait.can_derive_fnptr(sig) + } + + // Complex cases need more information + TypeKind::Array(t, len) => { + let inner_type = + self.can_derive.get(&t.into()).cloned().unwrap_or_default(); + if inner_type != CanDerive::Yes { + trace!( + " arrays of T for which we cannot derive {} \ + also cannot derive {}", + self.derive_trait, + self.derive_trait + ); + return CanDerive::No; + } + + if len == 0 && !self.derive_trait.can_derive_incomplete_array() + { + trace!( + " cannot derive {} for incomplete arrays", + self.derive_trait + ); + return CanDerive::No; + } + + if self.derive_trait.can_derive_large_array(self.ctx) { + trace!(" array can derive {}", self.derive_trait); + return CanDerive::Yes; + } + + if len > RUST_DERIVE_IN_ARRAY_LIMIT { + trace!( + " array is too large to derive {}, but it may be implemented", self.derive_trait + ); + return CanDerive::Manually; + } + trace!( + " array is small enough to derive {}", + self.derive_trait + ); + CanDerive::Yes + } + TypeKind::Vector(t, len) => { + let inner_type = + self.can_derive.get(&t.into()).cloned().unwrap_or_default(); + if inner_type != CanDerive::Yes { + trace!( + " vectors of T for which we cannot derive {} \ + also cannot derive {}", + self.derive_trait, + self.derive_trait + ); + return CanDerive::No; + } + assert_ne!(len, 0, "vectors cannot have zero length"); + self.derive_trait.can_derive_vector() + } + + TypeKind::Comp(ref info) => { + assert!( + !info.has_non_type_template_params(), + "The early ty.is_opaque check should have handled this case" + ); + + if !self.derive_trait.can_derive_compound_forward_decl() && + info.is_forward_declaration() + { + trace!( + " cannot derive {} for forward decls", + self.derive_trait + ); + return CanDerive::No; + } + + // NOTE: Take into account that while unions in C and C++ are copied by + // default, the may have an explicit destructor in C++, so we can't + // defer this check just for the union case. + if !self.derive_trait.can_derive_compound_with_destructor() && + self.ctx.lookup_has_destructor( + item.id().expect_type_id(self.ctx), + ) + { + trace!( + " comp has destructor which cannot derive {}", + self.derive_trait + ); + return CanDerive::No; + } + + if info.kind() == CompKind::Union { + if self.derive_trait.can_derive_union() { + if self.ctx.options().untagged_union && + // https://github.com/rust-lang/rust/issues/36640 + (!info.self_template_params(self.ctx).is_empty() || + !item.all_template_params(self.ctx).is_empty()) + { + trace!( + " cannot derive {} for Rust union because issue 36640", self.derive_trait + ); + return CanDerive::No; + } + // fall through to be same as non-union handling + } else { + if self.ctx.options().untagged_union { + trace!( + " cannot derive {} for Rust unions", + self.derive_trait + ); + return CanDerive::No; + } + + let layout_can_derive = + ty.layout(self.ctx).map_or(CanDerive::Yes, |l| { + l.opaque() + .array_size_within_derive_limit(self.ctx) + }); + match layout_can_derive { + CanDerive::Yes => { + trace!( + " union layout can trivially derive {}", + self.derive_trait + ); + } + _ => { + trace!( + " union layout cannot derive {}", + self.derive_trait + ); + } + }; + return layout_can_derive; + } + } + + if !self.derive_trait.can_derive_compound_with_vtable() && + item.has_vtable(self.ctx) + { + trace!( + " cannot derive {} for comp with vtable", + self.derive_trait + ); + return CanDerive::No; + } + + // Bitfield units are always represented as arrays of u8, but + // they're not traced as arrays, so we need to check here + // instead. + if !self.derive_trait.can_derive_large_array(self.ctx) && + info.has_too_large_bitfield_unit() && + !item.is_opaque(self.ctx, &()) + { + trace!( + " cannot derive {} for comp with too large bitfield unit", + self.derive_trait + ); + return CanDerive::No; + } + + let pred = self.derive_trait.consider_edge_comp(); + self.constrain_join(item, pred) + } + + TypeKind::ResolvedTypeRef(..) | + TypeKind::TemplateAlias(..) | + TypeKind::Alias(..) | + TypeKind::BlockPointer(..) => { + let pred = self.derive_trait.consider_edge_typeref(); + self.constrain_join(item, pred) + } + + TypeKind::TemplateInstantiation(..) => { + let pred = self.derive_trait.consider_edge_tmpl_inst(); + self.constrain_join(item, pred) + } + + TypeKind::Opaque => unreachable!( + "The early ty.is_opaque check should have handled this case" + ), + } + } + + fn constrain_join( + &mut self, + item: &Item, + consider_edge: EdgePredicate, + ) -> CanDerive { + let mut candidate = None; + + item.trace( + self.ctx, + &mut |sub_id, edge_kind| { + // Ignore ourselves, since union with ourself is a + // no-op. Ignore edges that aren't relevant to the + // analysis. + if sub_id == item.id() || !consider_edge(edge_kind) { + return; + } + + let can_derive = self.can_derive + .get(&sub_id) + .cloned() + .unwrap_or_default(); + + match can_derive { + CanDerive::Yes => trace!(" member {:?} can derive {}", sub_id, self.derive_trait), + CanDerive::Manually => trace!(" member {:?} cannot derive {}, but it may be implemented", sub_id, self.derive_trait), + CanDerive::No => trace!(" member {:?} cannot derive {}", sub_id, self.derive_trait), + } + + *candidate.get_or_insert(CanDerive::Yes) |= can_derive; + }, + &(), + ); + + if candidate.is_none() { + trace!( + " can derive {} because there are no members", + self.derive_trait + ); + } + candidate.unwrap_or_default() + } +} + +impl DeriveTrait { + fn not_by_name(&self, ctx: &BindgenContext, item: &Item) -> bool { + match self { + DeriveTrait::Copy => ctx.no_copy_by_name(item), + DeriveTrait::Debug => ctx.no_debug_by_name(item), + DeriveTrait::Default => ctx.no_default_by_name(item), + DeriveTrait::Hash => ctx.no_hash_by_name(item), + DeriveTrait::PartialEqOrPartialOrd => { + ctx.no_partialeq_by_name(item) + } + } + } + + fn consider_edge_comp(&self) -> EdgePredicate { + match self { + DeriveTrait::PartialEqOrPartialOrd => consider_edge_default, + _ => |kind| matches!(kind, EdgeKind::BaseMember | EdgeKind::Field), + } + } + + fn consider_edge_typeref(&self) -> EdgePredicate { + match self { + DeriveTrait::PartialEqOrPartialOrd => consider_edge_default, + _ => |kind| kind == EdgeKind::TypeReference, + } + } + + fn consider_edge_tmpl_inst(&self) -> EdgePredicate { + match self { + DeriveTrait::PartialEqOrPartialOrd => consider_edge_default, + _ => |kind| { + matches!( + kind, + EdgeKind::TemplateArgument | EdgeKind::TemplateDeclaration + ) + }, + } + } + + fn can_derive_large_array(&self, ctx: &BindgenContext) -> bool { + if ctx.options().rust_features().larger_arrays { + !matches!(self, DeriveTrait::Default) + } else { + matches!(self, DeriveTrait::Copy) + } + } + + fn can_derive_union(&self) -> bool { + matches!(self, DeriveTrait::Copy) + } + + fn can_derive_compound_with_destructor(&self) -> bool { + !matches!(self, DeriveTrait::Copy) + } + + fn can_derive_compound_with_vtable(&self) -> bool { + !matches!(self, DeriveTrait::Default) + } + + fn can_derive_compound_forward_decl(&self) -> bool { + matches!(self, DeriveTrait::Copy | DeriveTrait::Debug) + } + + fn can_derive_incomplete_array(&self) -> bool { + !matches!( + self, + DeriveTrait::Copy | + DeriveTrait::Hash | + DeriveTrait::PartialEqOrPartialOrd + ) + } + + fn can_derive_fnptr(&self, f: &FunctionSig) -> CanDerive { + match (self, f.function_pointers_can_derive()) { + (DeriveTrait::Copy, _) | (DeriveTrait::Default, _) | (_, true) => { + trace!(" function pointer can derive {}", self); + CanDerive::Yes + } + (DeriveTrait::Debug, false) => { + trace!(" function pointer cannot derive {}, but it may be implemented", self); + CanDerive::Manually + } + (_, false) => { + trace!(" function pointer cannot derive {}", self); + CanDerive::No + } + } + } + + fn can_derive_vector(&self) -> CanDerive { + match self { + DeriveTrait::PartialEqOrPartialOrd => { + // FIXME: vectors always can derive PartialEq, but they should + // not derive PartialOrd: + // https://github.com/rust-lang-nursery/packed_simd/issues/48 + trace!(" vectors cannot derive PartialOrd"); + CanDerive::No + } + _ => { + trace!(" vector can derive {}", self); + CanDerive::Yes + } + } + } + + fn can_derive_pointer(&self) -> CanDerive { + match self { + DeriveTrait::Default => { + trace!(" pointer cannot derive Default"); + CanDerive::No + } + _ => { + trace!(" pointer can derive {}", self); + CanDerive::Yes + } + } + } + + fn can_derive_simple(&self, kind: &TypeKind) -> CanDerive { + match (self, kind) { + // === Default === + (DeriveTrait::Default, TypeKind::Void) | + (DeriveTrait::Default, TypeKind::NullPtr) | + (DeriveTrait::Default, TypeKind::Enum(..)) | + (DeriveTrait::Default, TypeKind::Reference(..)) | + (DeriveTrait::Default, TypeKind::TypeParam) | + (DeriveTrait::Default, TypeKind::ObjCInterface(..)) | + (DeriveTrait::Default, TypeKind::ObjCId) | + (DeriveTrait::Default, TypeKind::ObjCSel) => { + trace!(" types that always cannot derive Default"); + CanDerive::No + } + (DeriveTrait::Default, TypeKind::UnresolvedTypeRef(..)) => { + unreachable!( + "Type with unresolved type ref can't reach derive default" + ) + } + // === Hash === + (DeriveTrait::Hash, TypeKind::Float(..)) | + (DeriveTrait::Hash, TypeKind::Complex(..)) => { + trace!(" float cannot derive Hash"); + CanDerive::No + } + // === others === + _ => { + trace!(" simple type that can always derive {}", self); + CanDerive::Yes + } + } + } +} + +impl fmt::Display for DeriveTrait { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let s = match self { + DeriveTrait::Copy => "Copy", + DeriveTrait::Debug => "Debug", + DeriveTrait::Default => "Default", + DeriveTrait::Hash => "Hash", + DeriveTrait::PartialEqOrPartialOrd => "PartialEq/PartialOrd", + }; + s.fmt(f) + } +} + +impl<'ctx> MonotoneFramework for CannotDerive<'ctx> { + type Node = ItemId; + type Extra = (&'ctx BindgenContext, DeriveTrait); + type Output = HashMap; + + fn new( + (ctx, derive_trait): (&'ctx BindgenContext, DeriveTrait), + ) -> CannotDerive<'ctx> { + let can_derive = HashMap::default(); + let dependencies = generate_dependencies(ctx, consider_edge_default); + + CannotDerive { + ctx, + derive_trait, + can_derive, + dependencies, + } + } + + fn initial_worklist(&self) -> Vec { + // The transitive closure of all allowlisted items, including explicitly + // blocklisted items. + self.ctx + .allowlisted_items() + .iter() + .cloned() + .flat_map(|i| { + let mut reachable = vec![i]; + i.trace( + self.ctx, + &mut |s, _| { + reachable.push(s); + }, + &(), + ); + reachable + }) + .collect() + } + + fn constrain(&mut self, id: ItemId) -> ConstrainResult { + trace!("constrain: {:?}", id); + + if let Some(CanDerive::No) = self.can_derive.get(&id).cloned() { + trace!(" already know it cannot derive {}", self.derive_trait); + return ConstrainResult::Same; + } + + let item = self.ctx.resolve_item(id); + let can_derive = match item.as_type() { + Some(ty) => { + let mut can_derive = self.constrain_type(item, ty); + if let CanDerive::Yes = can_derive { + let is_reached_limit = + |l: Layout| l.align > RUST_DERIVE_IN_ARRAY_LIMIT; + if !self.derive_trait.can_derive_large_array(self.ctx) && + ty.layout(self.ctx).map_or(false, is_reached_limit) + { + // We have to be conservative: the struct *could* have enough + // padding that we emit an array that is longer than + // `RUST_DERIVE_IN_ARRAY_LIMIT`. If we moved padding calculations + // into the IR and computed them before this analysis, then we could + // be precise rather than conservative here. + can_derive = CanDerive::Manually; + } + } + can_derive + } + None => self.constrain_join(item, consider_edge_default), + }; + + self.insert(id, can_derive) + } + + fn each_depending_on(&self, id: ItemId, mut f: F) + where + F: FnMut(ItemId), + { + if let Some(edges) = self.dependencies.get(&id) { + for item in edges { + trace!("enqueue {:?} into worklist", item); + f(*item); + } + } + } +} + +impl<'ctx> From> for HashMap { + fn from(analysis: CannotDerive<'ctx>) -> Self { + extra_assert!(analysis + .can_derive + .values() + .all(|v| *v != CanDerive::Yes)); + + analysis.can_derive + } +} + +/// Convert a `HashMap` into a `HashSet`. +/// +/// Elements that are not `CanDerive::Yes` are kept in the set, so that it +/// represents all items that cannot derive. +pub(crate) fn as_cannot_derive_set( + can_derive: HashMap, +) -> HashSet { + can_derive + .into_iter() + .filter_map(|(k, v)| if v != CanDerive::Yes { Some(k) } else { None }) + .collect() +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_destructor.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_destructor.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_destructor.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_destructor.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,176 @@ +//! Determining which types have destructors + +use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; +use crate::ir::comp::{CompKind, Field, FieldMethods}; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::traversal::EdgeKind; +use crate::ir::ty::TypeKind; +use crate::{HashMap, HashSet}; + +/// An analysis that finds for each IR item whether it has a destructor or not +/// +/// We use the monotone function `has destructor`, defined as follows: +/// +/// * If T is a type alias, a templated alias, or an indirection to another type, +/// T has a destructor if the type T refers to has a destructor. +/// * If T is a compound type, T has a destructor if we saw a destructor when parsing it, +/// or if it's a struct, T has a destructor if any of its base members has a destructor, +/// or if any of its fields have a destructor. +/// * If T is an instantiation of an abstract template definition, T has +/// a destructor if its template definition has a destructor, +/// or if any of the template arguments has a destructor. +/// * If T is the type of a field, that field has a destructor if it's not a bitfield, +/// and if T has a destructor. +#[derive(Debug, Clone)] +pub(crate) struct HasDestructorAnalysis<'ctx> { + ctx: &'ctx BindgenContext, + + // The incremental result of this analysis's computation. Everything in this + // set definitely has a destructor. + have_destructor: HashSet, + + // Dependencies saying that if a key ItemId has been inserted into the + // `have_destructor` set, then each of the ids in Vec need to be + // considered again. + // + // This is a subset of the natural IR graph with reversed edges, where we + // only include the edges from the IR graph that can affect whether a type + // has a destructor or not. + dependencies: HashMap>, +} + +impl<'ctx> HasDestructorAnalysis<'ctx> { + fn consider_edge(kind: EdgeKind) -> bool { + // These are the only edges that can affect whether a type has a + // destructor or not. + matches!( + kind, + EdgeKind::TypeReference | + EdgeKind::BaseMember | + EdgeKind::Field | + EdgeKind::TemplateArgument | + EdgeKind::TemplateDeclaration + ) + } + + fn insert>(&mut self, id: Id) -> ConstrainResult { + let id = id.into(); + let was_not_already_in_set = self.have_destructor.insert(id); + assert!( + was_not_already_in_set, + "We shouldn't try and insert {:?} twice because if it was \ + already in the set, `constrain` should have exited early.", + id + ); + ConstrainResult::Changed + } +} + +impl<'ctx> MonotoneFramework for HasDestructorAnalysis<'ctx> { + type Node = ItemId; + type Extra = &'ctx BindgenContext; + type Output = HashSet; + + fn new(ctx: &'ctx BindgenContext) -> Self { + let have_destructor = HashSet::default(); + let dependencies = generate_dependencies(ctx, Self::consider_edge); + + HasDestructorAnalysis { + ctx, + have_destructor, + dependencies, + } + } + + fn initial_worklist(&self) -> Vec { + self.ctx.allowlisted_items().iter().cloned().collect() + } + + fn constrain(&mut self, id: ItemId) -> ConstrainResult { + if self.have_destructor.contains(&id) { + // We've already computed that this type has a destructor and that can't + // change. + return ConstrainResult::Same; + } + + let item = self.ctx.resolve_item(id); + let ty = match item.as_type() { + None => return ConstrainResult::Same, + Some(ty) => ty, + }; + + match *ty.kind() { + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::ResolvedTypeRef(t) => { + if self.have_destructor.contains(&t.into()) { + self.insert(id) + } else { + ConstrainResult::Same + } + } + + TypeKind::Comp(ref info) => { + if info.has_own_destructor() { + return self.insert(id); + } + + match info.kind() { + CompKind::Union => ConstrainResult::Same, + CompKind::Struct => { + let base_or_field_destructor = + info.base_members().iter().any(|base| { + self.have_destructor.contains(&base.ty.into()) + }) || info.fields().iter().any( + |field| match *field { + Field::DataMember(ref data) => self + .have_destructor + .contains(&data.ty().into()), + Field::Bitfields(_) => false, + }, + ); + if base_or_field_destructor { + self.insert(id) + } else { + ConstrainResult::Same + } + } + } + } + + TypeKind::TemplateInstantiation(ref inst) => { + let definition_or_arg_destructor = self + .have_destructor + .contains(&inst.template_definition().into()) || + inst.template_arguments().iter().any(|arg| { + self.have_destructor.contains(&arg.into()) + }); + if definition_or_arg_destructor { + self.insert(id) + } else { + ConstrainResult::Same + } + } + + _ => ConstrainResult::Same, + } + } + + fn each_depending_on(&self, id: ItemId, mut f: F) + where + F: FnMut(ItemId), + { + if let Some(edges) = self.dependencies.get(&id) { + for item in edges { + trace!("enqueue {:?} into worklist", item); + f(*item); + } + } + } +} + +impl<'ctx> From> for HashSet { + fn from(analysis: HasDestructorAnalysis<'ctx>) -> Self { + analysis.have_destructor + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_float.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_float.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_float.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_float.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,252 @@ +//! Determining which types has float. + +use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; +use crate::ir::comp::Field; +use crate::ir::comp::FieldMethods; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::traversal::EdgeKind; +use crate::ir::ty::TypeKind; +use crate::{HashMap, HashSet}; + +/// An analysis that finds for each IR item whether it has float or not. +/// +/// We use the monotone constraint function `has_float`, +/// defined as follows: +/// +/// * If T is float or complex float, T trivially has. +/// * If T is a type alias, a templated alias or an indirection to another type, +/// it has float if the type T refers to has. +/// * If T is a compound type, it has float if any of base memter or field +/// has. +/// * If T is an instantiation of an abstract template definition, T has +/// float if any of the template arguments or template definition +/// has. +#[derive(Debug, Clone)] +pub(crate) struct HasFloat<'ctx> { + ctx: &'ctx BindgenContext, + + // The incremental result of this analysis's computation. Everything in this + // set has float. + has_float: HashSet, + + // Dependencies saying that if a key ItemId has been inserted into the + // `has_float` set, then each of the ids in Vec need to be + // considered again. + // + // This is a subset of the natural IR graph with reversed edges, where we + // only include the edges from the IR graph that can affect whether a type + // has float or not. + dependencies: HashMap>, +} + +impl<'ctx> HasFloat<'ctx> { + fn consider_edge(kind: EdgeKind) -> bool { + match kind { + EdgeKind::BaseMember | + EdgeKind::Field | + EdgeKind::TypeReference | + EdgeKind::VarType | + EdgeKind::TemplateArgument | + EdgeKind::TemplateDeclaration | + EdgeKind::TemplateParameterDefinition => true, + + EdgeKind::Constructor | + EdgeKind::Destructor | + EdgeKind::FunctionReturn | + EdgeKind::FunctionParameter | + EdgeKind::InnerType | + EdgeKind::InnerVar | + EdgeKind::Method => false, + EdgeKind::Generic => false, + } + } + + fn insert>(&mut self, id: Id) -> ConstrainResult { + let id = id.into(); + trace!("inserting {:?} into the has_float set", id); + + let was_not_already_in_set = self.has_float.insert(id); + assert!( + was_not_already_in_set, + "We shouldn't try and insert {:?} twice because if it was \ + already in the set, `constrain` should have exited early.", + id + ); + + ConstrainResult::Changed + } +} + +impl<'ctx> MonotoneFramework for HasFloat<'ctx> { + type Node = ItemId; + type Extra = &'ctx BindgenContext; + type Output = HashSet; + + fn new(ctx: &'ctx BindgenContext) -> HasFloat<'ctx> { + let has_float = HashSet::default(); + let dependencies = generate_dependencies(ctx, Self::consider_edge); + + HasFloat { + ctx, + has_float, + dependencies, + } + } + + fn initial_worklist(&self) -> Vec { + self.ctx.allowlisted_items().iter().cloned().collect() + } + + fn constrain(&mut self, id: ItemId) -> ConstrainResult { + trace!("constrain: {:?}", id); + + if self.has_float.contains(&id) { + trace!(" already know it do not have float"); + return ConstrainResult::Same; + } + + let item = self.ctx.resolve_item(id); + let ty = match item.as_type() { + Some(ty) => ty, + None => { + trace!(" not a type; ignoring"); + return ConstrainResult::Same; + } + }; + + match *ty.kind() { + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Function(..) | + TypeKind::Enum(..) | + TypeKind::Reference(..) | + TypeKind::TypeParam | + TypeKind::Opaque | + TypeKind::Pointer(..) | + TypeKind::UnresolvedTypeRef(..) | + TypeKind::ObjCInterface(..) | + TypeKind::ObjCId | + TypeKind::ObjCSel => { + trace!(" simple type that do not have float"); + ConstrainResult::Same + } + + TypeKind::Float(..) | TypeKind::Complex(..) => { + trace!(" float type has float"); + self.insert(id) + } + + TypeKind::Array(t, _) => { + if self.has_float.contains(&t.into()) { + trace!( + " Array with type T that has float also has float" + ); + return self.insert(id); + } + trace!(" Array with type T that do not have float also do not have float"); + ConstrainResult::Same + } + TypeKind::Vector(t, _) => { + if self.has_float.contains(&t.into()) { + trace!( + " Vector with type T that has float also has float" + ); + return self.insert(id); + } + trace!(" Vector with type T that do not have float also do not have float"); + ConstrainResult::Same + } + + TypeKind::ResolvedTypeRef(t) | + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::BlockPointer(t) => { + if self.has_float.contains(&t.into()) { + trace!( + " aliases and type refs to T which have float \ + also have float" + ); + self.insert(id) + } else { + trace!(" aliases and type refs to T which do not have float \ + also do not have floaarrayt"); + ConstrainResult::Same + } + } + + TypeKind::Comp(ref info) => { + let bases_have = info + .base_members() + .iter() + .any(|base| self.has_float.contains(&base.ty.into())); + if bases_have { + trace!(" bases have float, so we also have"); + return self.insert(id); + } + let fields_have = info.fields().iter().any(|f| match *f { + Field::DataMember(ref data) => { + self.has_float.contains(&data.ty().into()) + } + Field::Bitfields(ref bfu) => bfu + .bitfields() + .iter() + .any(|b| self.has_float.contains(&b.ty().into())), + }); + if fields_have { + trace!(" fields have float, so we also have"); + return self.insert(id); + } + + trace!(" comp doesn't have float"); + ConstrainResult::Same + } + + TypeKind::TemplateInstantiation(ref template) => { + let args_have = template + .template_arguments() + .iter() + .any(|arg| self.has_float.contains(&arg.into())); + if args_have { + trace!( + " template args have float, so \ + insantiation also has float" + ); + return self.insert(id); + } + + let def_has = self + .has_float + .contains(&template.template_definition().into()); + if def_has { + trace!( + " template definition has float, so \ + insantiation also has" + ); + return self.insert(id); + } + + trace!(" template instantiation do not have float"); + ConstrainResult::Same + } + } + } + + fn each_depending_on(&self, id: ItemId, mut f: F) + where + F: FnMut(ItemId), + { + if let Some(edges) = self.dependencies.get(&id) { + for item in edges { + trace!("enqueue {:?} into worklist", item); + f(*item); + } + } + } +} + +impl<'ctx> From> for HashSet { + fn from(analysis: HasFloat<'ctx>) -> Self { + analysis.has_float + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_type_param_in_array.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_type_param_in_array.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_type_param_in_array.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_type_param_in_array.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,252 @@ +//! Determining which types has typed parameters in array. + +use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; +use crate::ir::comp::Field; +use crate::ir::comp::FieldMethods; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::traversal::EdgeKind; +use crate::ir::ty::TypeKind; +use crate::{HashMap, HashSet}; + +/// An analysis that finds for each IR item whether it has array or not. +/// +/// We use the monotone constraint function `has_type_parameter_in_array`, +/// defined as follows: +/// +/// * If T is Array type with type parameter, T trivially has. +/// * If T is a type alias, a templated alias or an indirection to another type, +/// it has type parameter in array if the type T refers to has. +/// * If T is a compound type, it has array if any of base memter or field +/// has type paramter in array. +/// * If T is an instantiation of an abstract template definition, T has +/// type parameter in array if any of the template arguments or template definition +/// has. +#[derive(Debug, Clone)] +pub(crate) struct HasTypeParameterInArray<'ctx> { + ctx: &'ctx BindgenContext, + + // The incremental result of this analysis's computation. Everything in this + // set has array. + has_type_parameter_in_array: HashSet, + + // Dependencies saying that if a key ItemId has been inserted into the + // `has_type_parameter_in_array` set, then each of the ids in Vec need to be + // considered again. + // + // This is a subset of the natural IR graph with reversed edges, where we + // only include the edges from the IR graph that can affect whether a type + // has array or not. + dependencies: HashMap>, +} + +impl<'ctx> HasTypeParameterInArray<'ctx> { + fn consider_edge(kind: EdgeKind) -> bool { + match kind { + // These are the only edges that can affect whether a type has type parameter + // in array or not. + EdgeKind::BaseMember | + EdgeKind::Field | + EdgeKind::TypeReference | + EdgeKind::VarType | + EdgeKind::TemplateArgument | + EdgeKind::TemplateDeclaration | + EdgeKind::TemplateParameterDefinition => true, + + EdgeKind::Constructor | + EdgeKind::Destructor | + EdgeKind::FunctionReturn | + EdgeKind::FunctionParameter | + EdgeKind::InnerType | + EdgeKind::InnerVar | + EdgeKind::Method => false, + EdgeKind::Generic => false, + } + } + + fn insert>(&mut self, id: Id) -> ConstrainResult { + let id = id.into(); + trace!( + "inserting {:?} into the has_type_parameter_in_array set", + id + ); + + let was_not_already_in_set = + self.has_type_parameter_in_array.insert(id); + assert!( + was_not_already_in_set, + "We shouldn't try and insert {:?} twice because if it was \ + already in the set, `constrain` should have exited early.", + id + ); + + ConstrainResult::Changed + } +} + +impl<'ctx> MonotoneFramework for HasTypeParameterInArray<'ctx> { + type Node = ItemId; + type Extra = &'ctx BindgenContext; + type Output = HashSet; + + fn new(ctx: &'ctx BindgenContext) -> HasTypeParameterInArray<'ctx> { + let has_type_parameter_in_array = HashSet::default(); + let dependencies = generate_dependencies(ctx, Self::consider_edge); + + HasTypeParameterInArray { + ctx, + has_type_parameter_in_array, + dependencies, + } + } + + fn initial_worklist(&self) -> Vec { + self.ctx.allowlisted_items().iter().cloned().collect() + } + + fn constrain(&mut self, id: ItemId) -> ConstrainResult { + trace!("constrain: {:?}", id); + + if self.has_type_parameter_in_array.contains(&id) { + trace!(" already know it do not have array"); + return ConstrainResult::Same; + } + + let item = self.ctx.resolve_item(id); + let ty = match item.as_type() { + Some(ty) => ty, + None => { + trace!(" not a type; ignoring"); + return ConstrainResult::Same; + } + }; + + match *ty.kind() { + // Handle the simple cases. These cannot have array in type parameter + // without further information. + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Vector(..) | + TypeKind::Complex(..) | + TypeKind::Function(..) | + TypeKind::Enum(..) | + TypeKind::Reference(..) | + TypeKind::TypeParam | + TypeKind::Opaque | + TypeKind::Pointer(..) | + TypeKind::UnresolvedTypeRef(..) | + TypeKind::ObjCInterface(..) | + TypeKind::ObjCId | + TypeKind::ObjCSel => { + trace!(" simple type that do not have array"); + ConstrainResult::Same + } + + TypeKind::Array(t, _) => { + let inner_ty = + self.ctx.resolve_type(t).canonical_type(self.ctx); + match *inner_ty.kind() { + TypeKind::TypeParam => { + trace!(" Array with Named type has type parameter"); + self.insert(id) + } + _ => { + trace!( + " Array without Named type does have type parameter" + ); + ConstrainResult::Same + } + } + } + + TypeKind::ResolvedTypeRef(t) | + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::BlockPointer(t) => { + if self.has_type_parameter_in_array.contains(&t.into()) { + trace!( + " aliases and type refs to T which have array \ + also have array" + ); + self.insert(id) + } else { + trace!( + " aliases and type refs to T which do not have array \ + also do not have array" + ); + ConstrainResult::Same + } + } + + TypeKind::Comp(ref info) => { + let bases_have = info.base_members().iter().any(|base| { + self.has_type_parameter_in_array.contains(&base.ty.into()) + }); + if bases_have { + trace!(" bases have array, so we also have"); + return self.insert(id); + } + let fields_have = info.fields().iter().any(|f| match *f { + Field::DataMember(ref data) => self + .has_type_parameter_in_array + .contains(&data.ty().into()), + Field::Bitfields(..) => false, + }); + if fields_have { + trace!(" fields have array, so we also have"); + return self.insert(id); + } + + trace!(" comp doesn't have array"); + ConstrainResult::Same + } + + TypeKind::TemplateInstantiation(ref template) => { + let args_have = + template.template_arguments().iter().any(|arg| { + self.has_type_parameter_in_array.contains(&arg.into()) + }); + if args_have { + trace!( + " template args have array, so \ + insantiation also has array" + ); + return self.insert(id); + } + + let def_has = self + .has_type_parameter_in_array + .contains(&template.template_definition().into()); + if def_has { + trace!( + " template definition has array, so \ + insantiation also has" + ); + return self.insert(id); + } + + trace!(" template instantiation do not have array"); + ConstrainResult::Same + } + } + } + + fn each_depending_on(&self, id: ItemId, mut f: F) + where + F: FnMut(ItemId), + { + if let Some(edges) = self.dependencies.get(&id) { + for item in edges { + trace!("enqueue {:?} into worklist", item); + f(*item); + } + } + } +} + +impl<'ctx> From> for HashSet { + fn from(analysis: HasTypeParameterInArray<'ctx>) -> Self { + analysis.has_type_parameter_in_array + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_vtable.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_vtable.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_vtable.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/has_vtable.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,240 @@ +//! Determining which types has vtable + +use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::traversal::EdgeKind; +use crate::ir::ty::TypeKind; +use crate::{Entry, HashMap}; +use std::cmp; +use std::ops; + +/// The result of the `HasVtableAnalysis` for an individual item. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub(crate) enum HasVtableResult { + /// The item does not have a vtable pointer. + No, + + /// The item has a vtable and the actual vtable pointer is within this item. + SelfHasVtable, + + /// The item has a vtable, but the actual vtable pointer is in a base + /// member. + BaseHasVtable, +} + +impl Default for HasVtableResult { + fn default() -> Self { + HasVtableResult::No + } +} + +impl HasVtableResult { + /// Take the least upper bound of `self` and `rhs`. + pub(crate) fn join(self, rhs: Self) -> Self { + cmp::max(self, rhs) + } +} + +impl ops::BitOr for HasVtableResult { + type Output = Self; + + fn bitor(self, rhs: HasVtableResult) -> Self::Output { + self.join(rhs) + } +} + +impl ops::BitOrAssign for HasVtableResult { + fn bitor_assign(&mut self, rhs: HasVtableResult) { + *self = self.join(rhs) + } +} + +/// An analysis that finds for each IR item whether it has vtable or not +/// +/// We use the monotone function `has vtable`, defined as follows: +/// +/// * If T is a type alias, a templated alias, an indirection to another type, +/// or a reference of a type, T has vtable if the type T refers to has vtable. +/// * If T is a compound type, T has vtable if we saw a virtual function when +/// parsing it or any of its base member has vtable. +/// * If T is an instantiation of an abstract template definition, T has +/// vtable if template definition has vtable +#[derive(Debug, Clone)] +pub(crate) struct HasVtableAnalysis<'ctx> { + ctx: &'ctx BindgenContext, + + // The incremental result of this analysis's computation. Everything in this + // set definitely has a vtable. + have_vtable: HashMap, + + // Dependencies saying that if a key ItemId has been inserted into the + // `have_vtable` set, then each of the ids in Vec need to be + // considered again. + // + // This is a subset of the natural IR graph with reversed edges, where we + // only include the edges from the IR graph that can affect whether a type + // has a vtable or not. + dependencies: HashMap>, +} + +impl<'ctx> HasVtableAnalysis<'ctx> { + fn consider_edge(kind: EdgeKind) -> bool { + // These are the only edges that can affect whether a type has a + // vtable or not. + matches!( + kind, + EdgeKind::TypeReference | + EdgeKind::BaseMember | + EdgeKind::TemplateDeclaration + ) + } + + fn insert>( + &mut self, + id: Id, + result: HasVtableResult, + ) -> ConstrainResult { + if let HasVtableResult::No = result { + return ConstrainResult::Same; + } + + let id = id.into(); + match self.have_vtable.entry(id) { + Entry::Occupied(mut entry) => { + if *entry.get() < result { + entry.insert(result); + ConstrainResult::Changed + } else { + ConstrainResult::Same + } + } + Entry::Vacant(entry) => { + entry.insert(result); + ConstrainResult::Changed + } + } + } + + fn forward(&mut self, from: Id1, to: Id2) -> ConstrainResult + where + Id1: Into, + Id2: Into, + { + let from = from.into(); + let to = to.into(); + + match self.have_vtable.get(&from).cloned() { + None => ConstrainResult::Same, + Some(r) => self.insert(to, r), + } + } +} + +impl<'ctx> MonotoneFramework for HasVtableAnalysis<'ctx> { + type Node = ItemId; + type Extra = &'ctx BindgenContext; + type Output = HashMap; + + fn new(ctx: &'ctx BindgenContext) -> HasVtableAnalysis<'ctx> { + let have_vtable = HashMap::default(); + let dependencies = generate_dependencies(ctx, Self::consider_edge); + + HasVtableAnalysis { + ctx, + have_vtable, + dependencies, + } + } + + fn initial_worklist(&self) -> Vec { + self.ctx.allowlisted_items().iter().cloned().collect() + } + + fn constrain(&mut self, id: ItemId) -> ConstrainResult { + trace!("constrain {:?}", id); + + let item = self.ctx.resolve_item(id); + let ty = match item.as_type() { + None => return ConstrainResult::Same, + Some(ty) => ty, + }; + + // TODO #851: figure out a way to handle deriving from template type parameters. + match *ty.kind() { + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::ResolvedTypeRef(t) | + TypeKind::Reference(t) => { + trace!( + " aliases and references forward to their inner type" + ); + self.forward(t, id) + } + + TypeKind::Comp(ref info) => { + trace!(" comp considers its own methods and bases"); + let mut result = HasVtableResult::No; + + if info.has_own_virtual_method() { + trace!(" comp has its own virtual method"); + result |= HasVtableResult::SelfHasVtable; + } + + let bases_has_vtable = info.base_members().iter().any(|base| { + trace!(" comp has a base with a vtable: {:?}", base); + self.have_vtable.contains_key(&base.ty.into()) + }); + if bases_has_vtable { + result |= HasVtableResult::BaseHasVtable; + } + + self.insert(id, result) + } + + TypeKind::TemplateInstantiation(ref inst) => { + self.forward(inst.template_definition(), id) + } + + _ => ConstrainResult::Same, + } + } + + fn each_depending_on(&self, id: ItemId, mut f: F) + where + F: FnMut(ItemId), + { + if let Some(edges) = self.dependencies.get(&id) { + for item in edges { + trace!("enqueue {:?} into worklist", item); + f(*item); + } + } + } +} + +impl<'ctx> From> for HashMap { + fn from(analysis: HasVtableAnalysis<'ctx>) -> Self { + // We let the lack of an entry mean "No" to save space. + extra_assert!(analysis + .have_vtable + .values() + .all(|v| { *v != HasVtableResult::No })); + + analysis.have_vtable + } +} + +/// A convenience trait for the things for which we might wonder if they have a +/// vtable during codegen. +/// +/// This is not for _computing_ whether the thing has a vtable, it is for +/// looking up the results of the HasVtableAnalysis's computations for a +/// specific thing. +pub(crate) trait HasVtable { + /// Return `true` if this thing has vtable, `false` otherwise. + fn has_vtable(&self, ctx: &BindgenContext) -> bool; + + /// Return `true` if this thing has an actual vtable pointer in itself, as + /// opposed to transitively in a base member. + fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool; +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/mod.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,407 @@ +//! Fix-point analyses on the IR using the "monotone framework". +//! +//! A lattice is a set with a partial ordering between elements, where there is +//! a single least upper bound and a single greatest least bound for every +//! subset. We are dealing with finite lattices, which means that it has a +//! finite number of elements, and it follows that there exists a single top and +//! a single bottom member of the lattice. For example, the power set of a +//! finite set forms a finite lattice where partial ordering is defined by set +//! inclusion, that is `a <= b` if `a` is a subset of `b`. Here is the finite +//! lattice constructed from the set {0,1,2}: +//! +//! ```text +//! .----- Top = {0,1,2} -----. +//! / | \ +//! / | \ +//! / | \ +//! {0,1} -------. {0,2} .--------- {1,2} +//! | \ / \ / | +//! | / \ | +//! | / \ / \ | +//! {0} --------' {1} `---------- {2} +//! \ | / +//! \ | / +//! \ | / +//! `------ Bottom = {} ------' +//! ``` +//! +//! A monotone function `f` is a function where if `x <= y`, then it holds that +//! `f(x) <= f(y)`. It should be clear that running a monotone function to a +//! fix-point on a finite lattice will always terminate: `f` can only "move" +//! along the lattice in a single direction, and therefore can only either find +//! a fix-point in the middle of the lattice or continue to the top or bottom +//! depending if it is ascending or descending the lattice respectively. +//! +//! For a deeper introduction to the general form of this kind of analysis, see +//! [Static Program Analysis by Anders Møller and Michael I. Schwartzbach][spa]. +//! +//! [spa]: https://cs.au.dk/~amoeller/spa/spa.pdf + +// Re-export individual analyses. +mod template_params; +pub(crate) use self::template_params::UsedTemplateParameters; +mod derive; +pub use self::derive::DeriveTrait; +pub(crate) use self::derive::{as_cannot_derive_set, CannotDerive}; +mod has_vtable; +pub(crate) use self::has_vtable::{ + HasVtable, HasVtableAnalysis, HasVtableResult, +}; +mod has_destructor; +pub(crate) use self::has_destructor::HasDestructorAnalysis; +mod has_type_param_in_array; +pub(crate) use self::has_type_param_in_array::HasTypeParameterInArray; +mod has_float; +pub(crate) use self::has_float::HasFloat; +mod sizedness; +pub(crate) use self::sizedness::{ + Sizedness, SizednessAnalysis, SizednessResult, +}; + +use crate::ir::context::{BindgenContext, ItemId}; + +use crate::ir::traversal::{EdgeKind, Trace}; +use crate::HashMap; +use std::fmt; +use std::ops; + +/// An analysis in the monotone framework. +/// +/// Implementors of this trait must maintain the following two invariants: +/// +/// 1. The concrete data must be a member of a finite-height lattice. +/// 2. The concrete `constrain` method must be monotone: that is, +/// if `x <= y`, then `constrain(x) <= constrain(y)`. +/// +/// If these invariants do not hold, iteration to a fix-point might never +/// complete. +/// +/// For a simple example analysis, see the `ReachableFrom` type in the `tests` +/// module below. +pub(crate) trait MonotoneFramework: Sized + fmt::Debug { + /// The type of node in our dependency graph. + /// + /// This is just generic (and not `ItemId`) so that we can easily unit test + /// without constructing real `Item`s and their `ItemId`s. + type Node: Copy; + + /// Any extra data that is needed during computation. + /// + /// Again, this is just generic (and not `&BindgenContext`) so that we can + /// easily unit test without constructing real `BindgenContext`s full of + /// real `Item`s and real `ItemId`s. + type Extra: Sized; + + /// The final output of this analysis. Once we have reached a fix-point, we + /// convert `self` into this type, and return it as the final result of the + /// analysis. + type Output: From + fmt::Debug; + + /// Construct a new instance of this analysis. + fn new(extra: Self::Extra) -> Self; + + /// Get the initial set of nodes from which to start the analysis. Unless + /// you are sure of some domain-specific knowledge, this should be the + /// complete set of nodes. + fn initial_worklist(&self) -> Vec; + + /// Update the analysis for the given node. + /// + /// If this results in changing our internal state (ie, we discovered that + /// we have not reached a fix-point and iteration should continue), return + /// `ConstrainResult::Changed`. Otherwise, return `ConstrainResult::Same`. + /// When `constrain` returns `ConstrainResult::Same` for all nodes in the + /// set, we have reached a fix-point and the analysis is complete. + fn constrain(&mut self, node: Self::Node) -> ConstrainResult; + + /// For each node `d` that depends on the given `node`'s current answer when + /// running `constrain(d)`, call `f(d)`. This informs us which new nodes to + /// queue up in the worklist when `constrain(node)` reports updated + /// information. + fn each_depending_on(&self, node: Self::Node, f: F) + where + F: FnMut(Self::Node); +} + +/// Whether an analysis's `constrain` function modified the incremental results +/// or not. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum ConstrainResult { + /// The incremental results were updated, and the fix-point computation + /// should continue. + Changed, + + /// The incremental results were not updated. + Same, +} + +impl Default for ConstrainResult { + fn default() -> Self { + ConstrainResult::Same + } +} + +impl ops::BitOr for ConstrainResult { + type Output = Self; + + fn bitor(self, rhs: ConstrainResult) -> Self::Output { + if self == ConstrainResult::Changed || rhs == ConstrainResult::Changed { + ConstrainResult::Changed + } else { + ConstrainResult::Same + } + } +} + +impl ops::BitOrAssign for ConstrainResult { + fn bitor_assign(&mut self, rhs: ConstrainResult) { + *self = *self | rhs; + } +} + +/// Run an analysis in the monotone framework. +pub(crate) fn analyze(extra: Analysis::Extra) -> Analysis::Output +where + Analysis: MonotoneFramework, +{ + let mut analysis = Analysis::new(extra); + let mut worklist = analysis.initial_worklist(); + + while let Some(node) = worklist.pop() { + if let ConstrainResult::Changed = analysis.constrain(node) { + analysis.each_depending_on(node, |needs_work| { + worklist.push(needs_work); + }); + } + } + + analysis.into() +} + +/// Generate the dependency map for analysis +pub(crate) fn generate_dependencies( + ctx: &BindgenContext, + consider_edge: F, +) -> HashMap> +where + F: Fn(EdgeKind) -> bool, +{ + let mut dependencies = HashMap::default(); + + for &item in ctx.allowlisted_items() { + dependencies.entry(item).or_insert_with(Vec::new); + + { + // We reverse our natural IR graph edges to find dependencies + // between nodes. + item.trace( + ctx, + &mut |sub_item: ItemId, edge_kind| { + if ctx.allowlisted_items().contains(&sub_item) && + consider_edge(edge_kind) + { + dependencies + .entry(sub_item) + .or_insert_with(Vec::new) + .push(item); + } + }, + &(), + ); + } + } + dependencies +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{HashMap, HashSet}; + + // Here we find the set of nodes that are reachable from any given + // node. This is a lattice mapping nodes to subsets of all nodes. Our join + // function is set union. + // + // This is our test graph: + // + // +---+ +---+ + // | | | | + // | 1 | .----| 2 | + // | | | | | + // +---+ | +---+ + // | | ^ + // | | | + // | +---+ '------' + // '----->| | + // | 3 | + // .------| |------. + // | +---+ | + // | ^ | + // v | v + // +---+ | +---+ +---+ + // | | | | | | | + // | 4 | | | 5 |--->| 6 | + // | | | | | | | + // +---+ | +---+ +---+ + // | | | | + // | | | v + // | +---+ | +---+ + // | | | | | | + // '----->| 7 |<-----' | 8 | + // | | | | + // +---+ +---+ + // + // And here is the mapping from a node to the set of nodes that are + // reachable from it within the test graph: + // + // 1: {3,4,5,6,7,8} + // 2: {2} + // 3: {3,4,5,6,7,8} + // 4: {3,4,5,6,7,8} + // 5: {3,4,5,6,7,8} + // 6: {8} + // 7: {3,4,5,6,7,8} + // 8: {} + + #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] + struct Node(usize); + + #[derive(Clone, Debug, Default, PartialEq, Eq)] + struct Graph(HashMap>); + + impl Graph { + fn make_test_graph() -> Graph { + let mut g = Graph::default(); + g.0.insert(Node(1), vec![Node(3)]); + g.0.insert(Node(2), vec![Node(2)]); + g.0.insert(Node(3), vec![Node(4), Node(5)]); + g.0.insert(Node(4), vec![Node(7)]); + g.0.insert(Node(5), vec![Node(6), Node(7)]); + g.0.insert(Node(6), vec![Node(8)]); + g.0.insert(Node(7), vec![Node(3)]); + g.0.insert(Node(8), vec![]); + g + } + + fn reverse(&self) -> Graph { + let mut reversed = Graph::default(); + for (node, edges) in self.0.iter() { + reversed.0.entry(*node).or_insert_with(Vec::new); + for referent in edges.iter() { + reversed + .0 + .entry(*referent) + .or_insert_with(Vec::new) + .push(*node); + } + } + reversed + } + } + + #[derive(Clone, Debug, PartialEq, Eq)] + struct ReachableFrom<'a> { + reachable: HashMap>, + graph: &'a Graph, + reversed: Graph, + } + + impl<'a> MonotoneFramework for ReachableFrom<'a> { + type Node = Node; + type Extra = &'a Graph; + type Output = HashMap>; + + fn new(graph: &'a Graph) -> ReachableFrom { + let reversed = graph.reverse(); + ReachableFrom { + reachable: Default::default(), + graph, + reversed, + } + } + + fn initial_worklist(&self) -> Vec { + self.graph.0.keys().cloned().collect() + } + + fn constrain(&mut self, node: Node) -> ConstrainResult { + // The set of nodes reachable from a node `x` is + // + // reachable(x) = s_0 U s_1 U ... U reachable(s_0) U reachable(s_1) U ... + // + // where there exist edges from `x` to each of `s_0, s_1, ...`. + // + // Yes, what follows is a **terribly** inefficient set union + // implementation. Don't copy this code outside of this test! + + let original_size = self + .reachable + .entry(node) + .or_insert_with(HashSet::default) + .len(); + + for sub_node in self.graph.0[&node].iter() { + self.reachable.get_mut(&node).unwrap().insert(*sub_node); + + let sub_reachable = self + .reachable + .entry(*sub_node) + .or_insert_with(HashSet::default) + .clone(); + + for transitive in sub_reachable { + self.reachable.get_mut(&node).unwrap().insert(transitive); + } + } + + let new_size = self.reachable[&node].len(); + if original_size != new_size { + ConstrainResult::Changed + } else { + ConstrainResult::Same + } + } + + fn each_depending_on(&self, node: Node, mut f: F) + where + F: FnMut(Node), + { + for dep in self.reversed.0[&node].iter() { + f(*dep); + } + } + } + + impl<'a> From> for HashMap> { + fn from(reachable: ReachableFrom<'a>) -> Self { + reachable.reachable + } + } + + #[test] + fn monotone() { + let g = Graph::make_test_graph(); + let reachable = analyze::(&g); + println!("reachable = {:#?}", reachable); + + fn nodes(nodes: A) -> HashSet + where + A: AsRef<[usize]>, + { + nodes.as_ref().iter().cloned().map(Node).collect() + } + + let mut expected = HashMap::default(); + expected.insert(Node(1), nodes([3, 4, 5, 6, 7, 8])); + expected.insert(Node(2), nodes([2])); + expected.insert(Node(3), nodes([3, 4, 5, 6, 7, 8])); + expected.insert(Node(4), nodes([3, 4, 5, 6, 7, 8])); + expected.insert(Node(5), nodes([3, 4, 5, 6, 7, 8])); + expected.insert(Node(6), nodes([8])); + expected.insert(Node(7), nodes([3, 4, 5, 6, 7, 8])); + expected.insert(Node(8), nodes([])); + println!("expected = {:#?}", expected); + + assert_eq!(reachable, expected); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/sizedness.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/sizedness.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/sizedness.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/sizedness.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,361 @@ +//! Determining the sizedness of types (as base classes and otherwise). + +use super::{ + generate_dependencies, ConstrainResult, HasVtable, MonotoneFramework, +}; +use crate::ir::context::{BindgenContext, TypeId}; +use crate::ir::item::IsOpaque; +use crate::ir::traversal::EdgeKind; +use crate::ir::ty::TypeKind; +use crate::{Entry, HashMap}; +use std::{cmp, ops}; + +/// The result of the `Sizedness` analysis for an individual item. +/// +/// This is a chain lattice of the form: +/// +/// ```ignore +/// NonZeroSized +/// | +/// DependsOnTypeParam +/// | +/// ZeroSized +/// ``` +/// +/// We initially assume that all types are `ZeroSized` and then update our +/// understanding as we learn more about each type. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub(crate) enum SizednessResult { + /// The type is zero-sized. + /// + /// This means that if it is a C++ type, and is not being used as a base + /// member, then we must add an `_address` byte to enforce the + /// unique-address-per-distinct-object-instance rule. + ZeroSized, + + /// Whether this type is zero-sized or not depends on whether a type + /// parameter is zero-sized or not. + /// + /// For example, given these definitions: + /// + /// ```c++ + /// template + /// class Flongo : public T {}; + /// + /// class Empty {}; + /// + /// class NonEmpty { int x; }; + /// ``` + /// + /// Then `Flongo` is zero-sized, and needs an `_address` byte + /// inserted, while `Flongo` is *not* zero-sized, and should *not* + /// have an `_address` byte inserted. + /// + /// We don't properly handle this situation correctly right now: + /// + DependsOnTypeParam, + + /// Has some size that is known to be greater than zero. That doesn't mean + /// it has a static size, but it is not zero sized for sure. In other words, + /// it might contain an incomplete array or some other dynamically sized + /// type. + NonZeroSized, +} + +impl Default for SizednessResult { + fn default() -> Self { + SizednessResult::ZeroSized + } +} + +impl SizednessResult { + /// Take the least upper bound of `self` and `rhs`. + pub(crate) fn join(self, rhs: Self) -> Self { + cmp::max(self, rhs) + } +} + +impl ops::BitOr for SizednessResult { + type Output = Self; + + fn bitor(self, rhs: SizednessResult) -> Self::Output { + self.join(rhs) + } +} + +impl ops::BitOrAssign for SizednessResult { + fn bitor_assign(&mut self, rhs: SizednessResult) { + *self = self.join(rhs) + } +} + +/// An analysis that computes the sizedness of all types. +/// +/// * For types with known sizes -- for example pointers, scalars, etc... -- +/// they are assigned `NonZeroSized`. +/// +/// * For compound structure types with one or more fields, they are assigned +/// `NonZeroSized`. +/// +/// * For compound structure types without any fields, the results of the bases +/// are `join`ed. +/// +/// * For type parameters, `DependsOnTypeParam` is assigned. +#[derive(Debug)] +pub(crate) struct SizednessAnalysis<'ctx> { + ctx: &'ctx BindgenContext, + dependencies: HashMap>, + // Incremental results of the analysis. Missing entries are implicitly + // considered `ZeroSized`. + sized: HashMap, +} + +impl<'ctx> SizednessAnalysis<'ctx> { + fn consider_edge(kind: EdgeKind) -> bool { + // These are the only edges that can affect whether a type is + // zero-sized or not. + matches!( + kind, + EdgeKind::TemplateArgument | + EdgeKind::TemplateParameterDefinition | + EdgeKind::TemplateDeclaration | + EdgeKind::TypeReference | + EdgeKind::BaseMember | + EdgeKind::Field + ) + } + + /// Insert an incremental result, and return whether this updated our + /// knowledge of types and we should continue the analysis. + fn insert( + &mut self, + id: TypeId, + result: SizednessResult, + ) -> ConstrainResult { + trace!("inserting {:?} for {:?}", result, id); + + if let SizednessResult::ZeroSized = result { + return ConstrainResult::Same; + } + + match self.sized.entry(id) { + Entry::Occupied(mut entry) => { + if *entry.get() < result { + entry.insert(result); + ConstrainResult::Changed + } else { + ConstrainResult::Same + } + } + Entry::Vacant(entry) => { + entry.insert(result); + ConstrainResult::Changed + } + } + } + + fn forward(&mut self, from: TypeId, to: TypeId) -> ConstrainResult { + match self.sized.get(&from).cloned() { + None => ConstrainResult::Same, + Some(r) => self.insert(to, r), + } + } +} + +impl<'ctx> MonotoneFramework for SizednessAnalysis<'ctx> { + type Node = TypeId; + type Extra = &'ctx BindgenContext; + type Output = HashMap; + + fn new(ctx: &'ctx BindgenContext) -> SizednessAnalysis<'ctx> { + let dependencies = generate_dependencies(ctx, Self::consider_edge) + .into_iter() + .filter_map(|(id, sub_ids)| { + id.as_type_id(ctx).map(|id| { + ( + id, + sub_ids + .into_iter() + .filter_map(|s| s.as_type_id(ctx)) + .collect::>(), + ) + }) + }) + .collect(); + + let sized = HashMap::default(); + + SizednessAnalysis { + ctx, + dependencies, + sized, + } + } + + fn initial_worklist(&self) -> Vec { + self.ctx + .allowlisted_items() + .iter() + .cloned() + .filter_map(|id| id.as_type_id(self.ctx)) + .collect() + } + + fn constrain(&mut self, id: TypeId) -> ConstrainResult { + trace!("constrain {:?}", id); + + if let Some(SizednessResult::NonZeroSized) = + self.sized.get(&id).cloned() + { + trace!(" already know it is not zero-sized"); + return ConstrainResult::Same; + } + + if id.has_vtable_ptr(self.ctx) { + trace!(" has an explicit vtable pointer, therefore is not zero-sized"); + return self.insert(id, SizednessResult::NonZeroSized); + } + + let ty = self.ctx.resolve_type(id); + + if id.is_opaque(self.ctx, &()) { + trace!(" type is opaque; checking layout..."); + let result = + ty.layout(self.ctx).map_or(SizednessResult::ZeroSized, |l| { + if l.size == 0 { + trace!(" ...layout has size == 0"); + SizednessResult::ZeroSized + } else { + trace!(" ...layout has size > 0"); + SizednessResult::NonZeroSized + } + }); + return self.insert(id, result); + } + + match *ty.kind() { + TypeKind::Void => { + trace!(" void is zero-sized"); + self.insert(id, SizednessResult::ZeroSized) + } + + TypeKind::TypeParam => { + trace!( + " type params sizedness depends on what they're \ + instantiated as" + ); + self.insert(id, SizednessResult::DependsOnTypeParam) + } + + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Complex(..) | + TypeKind::Function(..) | + TypeKind::Enum(..) | + TypeKind::Reference(..) | + TypeKind::NullPtr | + TypeKind::ObjCId | + TypeKind::ObjCSel | + TypeKind::Pointer(..) => { + trace!(" {:?} is known not to be zero-sized", ty.kind()); + self.insert(id, SizednessResult::NonZeroSized) + } + + TypeKind::ObjCInterface(..) => { + trace!(" obj-c interfaces always have at least the `isa` pointer"); + self.insert(id, SizednessResult::NonZeroSized) + } + + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::BlockPointer(t) | + TypeKind::ResolvedTypeRef(t) => { + trace!(" aliases and type refs forward to their inner type"); + self.forward(t, id) + } + + TypeKind::TemplateInstantiation(ref inst) => { + trace!( + " template instantiations are zero-sized if their \ + definition is zero-sized" + ); + self.forward(inst.template_definition(), id) + } + + TypeKind::Array(_, 0) => { + trace!(" arrays of zero elements are zero-sized"); + self.insert(id, SizednessResult::ZeroSized) + } + TypeKind::Array(..) => { + trace!(" arrays of > 0 elements are not zero-sized"); + self.insert(id, SizednessResult::NonZeroSized) + } + TypeKind::Vector(..) => { + trace!(" vectors are not zero-sized"); + self.insert(id, SizednessResult::NonZeroSized) + } + + TypeKind::Comp(ref info) => { + trace!(" comp considers its own fields and bases"); + + if !info.fields().is_empty() { + return self.insert(id, SizednessResult::NonZeroSized); + } + + let result = info + .base_members() + .iter() + .filter_map(|base| self.sized.get(&base.ty)) + .fold(SizednessResult::ZeroSized, |a, b| a.join(*b)); + + self.insert(id, result) + } + + TypeKind::Opaque => { + unreachable!("covered by the .is_opaque() check above") + } + + TypeKind::UnresolvedTypeRef(..) => { + unreachable!("Should have been resolved after parsing!"); + } + } + } + + fn each_depending_on(&self, id: TypeId, mut f: F) + where + F: FnMut(TypeId), + { + if let Some(edges) = self.dependencies.get(&id) { + for ty in edges { + trace!("enqueue {:?} into worklist", ty); + f(*ty); + } + } + } +} + +impl<'ctx> From> for HashMap { + fn from(analysis: SizednessAnalysis<'ctx>) -> Self { + // We let the lack of an entry mean "ZeroSized" to save space. + extra_assert!(analysis + .sized + .values() + .all(|v| { *v != SizednessResult::ZeroSized })); + + analysis.sized + } +} + +/// A convenience trait for querying whether some type or ID is sized. +/// +/// This is not for _computing_ whether the thing is sized, it is for looking up +/// the results of the `Sizedness` analysis's computations for a specific thing. +pub(crate) trait Sizedness { + /// Get the sizedness of this type. + fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult; + + /// Is the sizedness for this type `SizednessResult::ZeroSized`? + fn is_zero_sized(&self, ctx: &BindgenContext) -> bool { + self.sizedness(ctx) == SizednessResult::ZeroSized + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/template_params.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/template_params.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/template_params.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/analysis/template_params.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,607 @@ +//! Discover which template type parameters are actually used. +//! +//! ### Why do we care? +//! +//! C++ allows ignoring template parameters, while Rust does not. Usually we can +//! blindly stick a `PhantomData` inside a generic Rust struct to make up for +//! this. That doesn't work for templated type aliases, however: +//! +//! ```C++ +//! template +//! using Fml = int; +//! ``` +//! +//! If we generate the naive Rust code for this alias, we get: +//! +//! ```ignore +//! pub(crate) type Fml = ::std::os::raw::int; +//! ``` +//! +//! And this is rejected by `rustc` due to the unused type parameter. +//! +//! (Aside: in these simple cases, `libclang` will often just give us the +//! aliased type directly, and we will never even know we were dealing with +//! aliases, let alone templated aliases. It's the more convoluted scenarios +//! where we get to have some fun...) +//! +//! For such problematic template aliases, we could generate a tuple whose +//! second member is a `PhantomData`. Or, if we wanted to go the extra mile, +//! we could even generate some smarter wrapper that implements `Deref`, +//! `DerefMut`, `From`, `Into`, `AsRef`, and `AsMut` to the actually aliased +//! type. However, this is still lackluster: +//! +//! 1. Even with a billion conversion-trait implementations, using the generated +//! bindings is rather un-ergonomic. +//! 2. With either of these solutions, we need to keep track of which aliases +//! we've transformed like this in order to generate correct uses of the +//! wrapped type. +//! +//! Given that we have to properly track which template parameters ended up used +//! for (2), we might as well leverage that information to make ergonomic +//! bindings that don't contain any unused type parameters at all, and +//! completely avoid the pain of (1). +//! +//! ### How do we determine which template parameters are used? +//! +//! Determining which template parameters are actually used is a trickier +//! problem than it might seem at a glance. On the one hand, trivial uses are +//! easy to detect: +//! +//! ```C++ +//! template +//! class Foo { +//! T trivial_use_of_t; +//! }; +//! ``` +//! +//! It gets harder when determining if one template parameter is used depends on +//! determining if another template parameter is used. In this example, whether +//! `U` is used depends on whether `T` is used. +//! +//! ```C++ +//! template +//! class DoesntUseT { +//! int x; +//! }; +//! +//! template +//! class Fml { +//! DoesntUseT lololol; +//! }; +//! ``` +//! +//! We can express the set of used template parameters as a constraint solving +//! problem (where the set of template parameters used by a given IR item is the +//! union of its sub-item's used template parameters) and iterate to a +//! fixed-point. +//! +//! We use the `ir::analysis::MonotoneFramework` infrastructure for this +//! fix-point analysis, where our lattice is the mapping from each IR item to +//! the powerset of the template parameters that appear in the input C++ header, +//! our join function is set union. The set of template parameters appearing in +//! the program is finite, as is the number of IR items. We start at our +//! lattice's bottom element: every item mapping to an empty set of template +//! parameters. Our analysis only adds members to each item's set of used +//! template parameters, never removes them, so it is monotone. Because our +//! lattice is finite and our constraint function is monotone, iteration to a +//! fix-point will terminate. +//! +//! See `src/ir/analysis.rs` for more. + +use super::{ConstrainResult, MonotoneFramework}; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::item::{Item, ItemSet}; +use crate::ir::template::{TemplateInstantiation, TemplateParameters}; +use crate::ir::traversal::{EdgeKind, Trace}; +use crate::ir::ty::TypeKind; +use crate::{HashMap, HashSet}; + +/// An analysis that finds for each IR item its set of template parameters that +/// it uses. +/// +/// We use the monotone constraint function `template_param_usage`, defined as +/// follows: +/// +/// * If `T` is a named template type parameter, it trivially uses itself: +/// +/// ```ignore +/// template_param_usage(T) = { T } +/// ``` +/// +/// * If `inst` is a template instantiation, `inst.args` are the template +/// instantiation's template arguments, `inst.def` is the template definition +/// being instantiated, and `inst.def.params` is the template definition's +/// template parameters, then the instantiation's usage is the union of each +/// of its arguments' usages *if* the corresponding template parameter is in +/// turn used by the template definition: +/// +/// ```ignore +/// template_param_usage(inst) = union( +/// template_param_usage(inst.args[i]) +/// for i in 0..length(inst.args.length) +/// if inst.def.params[i] in template_param_usage(inst.def) +/// ) +/// ``` +/// +/// * Finally, for all other IR item kinds, we use our lattice's `join` +/// operation: set union with each successor of the given item's template +/// parameter usage: +/// +/// ```ignore +/// template_param_usage(v) = +/// union(template_param_usage(w) for w in successors(v)) +/// ``` +/// +/// Note that we ignore certain edges in the graph, such as edges from a +/// template declaration to its template parameters' definitions for this +/// analysis. If we didn't, then we would mistakenly determine that ever +/// template parameter is always used. +/// +/// The final wrinkle is handling of blocklisted types. Normally, we say that +/// the set of allowlisted items is the transitive closure of items explicitly +/// called out for allowlisting, *without* any items explicitly called out as +/// blocklisted. However, for the purposes of this analysis's correctness, we +/// simplify and consider run the analysis on the full transitive closure of +/// allowlisted items. We do, however, treat instantiations of blocklisted items +/// specially; see `constrain_instantiation_of_blocklisted_template` and its +/// documentation for details. +#[derive(Debug, Clone)] +pub(crate) struct UsedTemplateParameters<'ctx> { + ctx: &'ctx BindgenContext, + + // The Option is only there for temporary moves out of the hash map. See the + // comments in `UsedTemplateParameters::constrain` below. + used: HashMap>, + + dependencies: HashMap>, + + // The set of allowlisted items, without any blocklisted items reachable + // from the allowlisted items which would otherwise be considered + // allowlisted as well. + allowlisted_items: HashSet, +} + +impl<'ctx> UsedTemplateParameters<'ctx> { + fn consider_edge(kind: EdgeKind) -> bool { + match kind { + // For each of these kinds of edges, if the referent uses a template + // parameter, then it should be considered that the origin of the + // edge also uses the template parameter. + EdgeKind::TemplateArgument | + EdgeKind::BaseMember | + EdgeKind::Field | + EdgeKind::Constructor | + EdgeKind::Destructor | + EdgeKind::VarType | + EdgeKind::FunctionReturn | + EdgeKind::FunctionParameter | + EdgeKind::TypeReference => true, + + // An inner var or type using a template parameter is orthogonal + // from whether we use it. See template-param-usage-{6,11}.hpp. + EdgeKind::InnerVar | EdgeKind::InnerType => false, + + // We can't emit machine code for new monomorphizations of class + // templates' methods (and don't detect explicit instantiations) so + // we must ignore template parameters that are only used by + // methods. This doesn't apply to a function type's return or + // parameter types, however, because of type aliases of function + // pointers that use template parameters, eg + // tests/headers/struct_with_typedef_template_arg.hpp + EdgeKind::Method => false, + + // If we considered these edges, we would end up mistakenly claiming + // that every template parameter always used. + EdgeKind::TemplateDeclaration | + EdgeKind::TemplateParameterDefinition => false, + + // Since we have to be careful about which edges we consider for + // this analysis to be correct, we ignore generic edges. We also + // avoid a `_` wild card to force authors of new edge kinds to + // determine whether they need to be considered by this analysis. + EdgeKind::Generic => false, + } + } + + fn take_this_id_usage_set>( + &mut self, + this_id: Id, + ) -> ItemSet { + let this_id = this_id.into(); + self.used + .get_mut(&this_id) + .expect( + "Should have a set of used template params for every item \ + id", + ) + .take() + .expect( + "Should maintain the invariant that all used template param \ + sets are `Some` upon entry of `constrain`", + ) + } + + /// We say that blocklisted items use all of their template parameters. The + /// blocklisted type is most likely implemented explicitly by the user, + /// since it won't be in the generated bindings, and we don't know exactly + /// what they'll to with template parameters, but we can push the issue down + /// the line to them. + fn constrain_instantiation_of_blocklisted_template( + &self, + this_id: ItemId, + used_by_this_id: &mut ItemSet, + instantiation: &TemplateInstantiation, + ) { + trace!( + " instantiation of blocklisted template, uses all template \ + arguments" + ); + + let args = instantiation + .template_arguments() + .iter() + .map(|a| { + a.into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(self.ctx) + .id() + }) + .filter(|a| *a != this_id) + .flat_map(|a| { + self.used + .get(&a) + .expect("Should have a used entry for the template arg") + .as_ref() + .expect( + "Because a != this_id, and all used template \ + param sets other than this_id's are `Some`, \ + a's used template param set should be `Some`", + ) + .iter() + .cloned() + }); + + used_by_this_id.extend(args); + } + + /// A template instantiation's concrete template argument is only used if + /// the template definition uses the corresponding template parameter. + fn constrain_instantiation( + &self, + this_id: ItemId, + used_by_this_id: &mut ItemSet, + instantiation: &TemplateInstantiation, + ) { + trace!(" template instantiation"); + + let decl = self.ctx.resolve_type(instantiation.template_definition()); + let args = instantiation.template_arguments(); + + let params = decl.self_template_params(self.ctx); + + debug_assert!(this_id != instantiation.template_definition()); + let used_by_def = self.used + .get(&instantiation.template_definition().into()) + .expect("Should have a used entry for instantiation's template definition") + .as_ref() + .expect("And it should be Some because only this_id's set is None, and an \ + instantiation's template definition should never be the \ + instantiation itself"); + + for (arg, param) in args.iter().zip(params.iter()) { + trace!( + " instantiation's argument {:?} is used if definition's \ + parameter {:?} is used", + arg, + param + ); + + if used_by_def.contains(¶m.into()) { + trace!(" param is used by template definition"); + + let arg = arg + .into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(self.ctx) + .id(); + + if arg == this_id { + continue; + } + + let used_by_arg = self + .used + .get(&arg) + .expect("Should have a used entry for the template arg") + .as_ref() + .expect( + "Because arg != this_id, and all used template \ + param sets other than this_id's are `Some`, \ + arg's used template param set should be \ + `Some`", + ) + .iter() + .cloned(); + used_by_this_id.extend(used_by_arg); + } + } + } + + /// The join operation on our lattice: the set union of all of this ID's + /// successors. + fn constrain_join(&self, used_by_this_id: &mut ItemSet, item: &Item) { + trace!(" other item: join with successors' usage"); + + item.trace( + self.ctx, + &mut |sub_id, edge_kind| { + // Ignore ourselves, since union with ourself is a + // no-op. Ignore edges that aren't relevant to the + // analysis. + if sub_id == item.id() || !Self::consider_edge(edge_kind) { + return; + } + + let used_by_sub_id = self + .used + .get(&sub_id) + .expect("Should have a used set for the sub_id successor") + .as_ref() + .expect( + "Because sub_id != id, and all used template \ + param sets other than id's are `Some`, \ + sub_id's used template param set should be \ + `Some`", + ) + .iter() + .cloned(); + + trace!( + " union with {:?}'s usage: {:?}", + sub_id, + used_by_sub_id.clone().collect::>() + ); + + used_by_this_id.extend(used_by_sub_id); + }, + &(), + ); + } +} + +impl<'ctx> MonotoneFramework for UsedTemplateParameters<'ctx> { + type Node = ItemId; + type Extra = &'ctx BindgenContext; + type Output = HashMap; + + fn new(ctx: &'ctx BindgenContext) -> UsedTemplateParameters<'ctx> { + let mut used = HashMap::default(); + let mut dependencies = HashMap::default(); + let allowlisted_items: HashSet<_> = + ctx.allowlisted_items().iter().cloned().collect(); + + let allowlisted_and_blocklisted_items: ItemSet = allowlisted_items + .iter() + .cloned() + .flat_map(|i| { + let mut reachable = vec![i]; + i.trace( + ctx, + &mut |s, _| { + reachable.push(s); + }, + &(), + ); + reachable + }) + .collect(); + + for item in allowlisted_and_blocklisted_items { + dependencies.entry(item).or_insert_with(Vec::new); + used.entry(item).or_insert_with(|| Some(ItemSet::new())); + + { + // We reverse our natural IR graph edges to find dependencies + // between nodes. + item.trace( + ctx, + &mut |sub_item: ItemId, _| { + used.entry(sub_item) + .or_insert_with(|| Some(ItemSet::new())); + dependencies + .entry(sub_item) + .or_insert_with(Vec::new) + .push(item); + }, + &(), + ); + } + + // Additionally, whether a template instantiation's template + // arguments are used depends on whether the template declaration's + // generic template parameters are used. + let item_kind = + ctx.resolve_item(item).as_type().map(|ty| ty.kind()); + if let Some(TypeKind::TemplateInstantiation(inst)) = item_kind { + let decl = ctx.resolve_type(inst.template_definition()); + let args = inst.template_arguments(); + + // Although template definitions should always have + // template parameters, there is a single exception: + // opaque templates. Hence the unwrap_or. + let params = decl.self_template_params(ctx); + + for (arg, param) in args.iter().zip(params.iter()) { + let arg = arg + .into_resolver() + .through_type_aliases() + .through_type_refs() + .resolve(ctx) + .id(); + + let param = param + .into_resolver() + .through_type_aliases() + .through_type_refs() + .resolve(ctx) + .id(); + + used.entry(arg).or_insert_with(|| Some(ItemSet::new())); + used.entry(param).or_insert_with(|| Some(ItemSet::new())); + + dependencies + .entry(arg) + .or_insert_with(Vec::new) + .push(param); + } + } + } + + if cfg!(feature = "testing_only_extra_assertions") { + // Invariant: The `used` map has an entry for every allowlisted + // item, as well as all explicitly blocklisted items that are + // reachable from allowlisted items. + // + // Invariant: the `dependencies` map has an entry for every + // allowlisted item. + // + // (This is so that every item we call `constrain` on is guaranteed + // to have a set of template parameters, and we can allow + // blocklisted templates to use all of their parameters). + for item in allowlisted_items.iter() { + extra_assert!(used.contains_key(item)); + extra_assert!(dependencies.contains_key(item)); + item.trace( + ctx, + &mut |sub_item, _| { + extra_assert!(used.contains_key(&sub_item)); + extra_assert!(dependencies.contains_key(&sub_item)); + }, + &(), + ) + } + } + + UsedTemplateParameters { + ctx, + used, + dependencies, + allowlisted_items, + } + } + + fn initial_worklist(&self) -> Vec { + // The transitive closure of all allowlisted items, including explicitly + // blocklisted items. + self.ctx + .allowlisted_items() + .iter() + .cloned() + .flat_map(|i| { + let mut reachable = vec![i]; + i.trace( + self.ctx, + &mut |s, _| { + reachable.push(s); + }, + &(), + ); + reachable + }) + .collect() + } + + fn constrain(&mut self, id: ItemId) -> ConstrainResult { + // Invariant: all hash map entries' values are `Some` upon entering and + // exiting this method. + extra_assert!(self.used.values().all(|v| v.is_some())); + + // Take the set for this ID out of the hash map while we mutate it based + // on other hash map entries. We *must* put it back into the hash map at + // the end of this method. This allows us to side-step HashMap's lack of + // an analog to slice::split_at_mut. + let mut used_by_this_id = self.take_this_id_usage_set(id); + + trace!("constrain {:?}", id); + trace!(" initially, used set is {:?}", used_by_this_id); + + let original_len = used_by_this_id.len(); + + let item = self.ctx.resolve_item(id); + let ty_kind = item.as_type().map(|ty| ty.kind()); + match ty_kind { + // Named template type parameters trivially use themselves. + Some(&TypeKind::TypeParam) => { + trace!(" named type, trivially uses itself"); + used_by_this_id.insert(id); + } + // Template instantiations only use their template arguments if the + // template definition uses the corresponding template parameter. + Some(TypeKind::TemplateInstantiation(inst)) => { + if self + .allowlisted_items + .contains(&inst.template_definition().into()) + { + self.constrain_instantiation( + id, + &mut used_by_this_id, + inst, + ); + } else { + self.constrain_instantiation_of_blocklisted_template( + id, + &mut used_by_this_id, + inst, + ); + } + } + // Otherwise, add the union of each of its referent item's template + // parameter usage. + _ => self.constrain_join(&mut used_by_this_id, item), + } + + trace!(" finally, used set is {:?}", used_by_this_id); + + let new_len = used_by_this_id.len(); + assert!( + new_len >= original_len, + "This is the property that ensures this function is monotone -- \ + if it doesn't hold, the analysis might never terminate!" + ); + + // Put the set back in the hash map and restore our invariant. + debug_assert!(self.used[&id].is_none()); + self.used.insert(id, Some(used_by_this_id)); + extra_assert!(self.used.values().all(|v| v.is_some())); + + if new_len != original_len { + ConstrainResult::Changed + } else { + ConstrainResult::Same + } + } + + fn each_depending_on(&self, item: ItemId, mut f: F) + where + F: FnMut(ItemId), + { + if let Some(edges) = self.dependencies.get(&item) { + for item in edges { + trace!("enqueue {:?} into worklist", item); + f(*item); + } + } + } +} + +impl<'ctx> From> for HashMap { + fn from(used_templ_params: UsedTemplateParameters<'ctx>) -> Self { + used_templ_params + .used + .into_iter() + .map(|(k, v)| (k, v.unwrap())) + .collect() + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/annotations.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/annotations.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/annotations.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/annotations.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,256 @@ +//! Types and functions related to bindgen annotation comments. +//! +//! Users can add annotations in doc comments to types that they would like to +//! replace other types with, mark as opaque, etc. This module deals with all of +//! that stuff. + +use std::str::FromStr; + +use crate::clang; + +/// What kind of visibility modifer should be used for a struct or field? +#[derive(Copy, PartialEq, Eq, Clone, Debug)] +pub enum FieldVisibilityKind { + /// Fields are marked as private, i.e., struct Foo {bar: bool} + Private, + /// Fields are marked as crate public, i.e., struct Foo {pub(crate) bar: bool} + PublicCrate, + /// Fields are marked as public, i.e., struct Foo {pub bar: bool} + Public, +} + +impl FromStr for FieldVisibilityKind { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "private" => Ok(Self::Private), + "crate" => Ok(Self::PublicCrate), + "public" => Ok(Self::Public), + _ => Err(format!("Invalid visibility kind: `{}`", s)), + } + } +} + +impl std::fmt::Display for FieldVisibilityKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match self { + FieldVisibilityKind::Private => "private", + FieldVisibilityKind::PublicCrate => "crate", + FieldVisibilityKind::Public => "public", + }; + + s.fmt(f) + } +} + +impl Default for FieldVisibilityKind { + fn default() -> Self { + FieldVisibilityKind::Public + } +} + +/// What kind of accessor should we provide for a field? +#[derive(Copy, PartialEq, Eq, Clone, Debug)] +pub(crate) enum FieldAccessorKind { + /// No accessor. + None, + /// Plain accessor. + Regular, + /// Unsafe accessor. + Unsafe, + /// Immutable accessor. + Immutable, +} + +/// Annotations for a given item, or a field. +/// +/// You can see the kind of comments that are accepted in the [Doxygen documentation](https://www.doxygen.nl/manual/docblocks.html). +#[derive(Default, Clone, PartialEq, Eq, Debug)] +pub(crate) struct Annotations { + /// Whether this item is marked as opaque. Only applies to types. + opaque: bool, + /// Whether this item should be hidden from the output. Only applies to + /// types, or enum variants. + hide: bool, + /// Whether this type should be replaced by another. The name is a + /// namespace-aware path. + use_instead_of: Option>, + /// Manually disable deriving copy/clone on this type. Only applies to + /// struct or union types. + disallow_copy: bool, + /// Manually disable deriving debug on this type. + disallow_debug: bool, + /// Manually disable deriving/implement default on this type. + disallow_default: bool, + /// Whether to add a `#[must_use]` annotation to this type. + must_use_type: bool, + /// Visibility of struct fields. You can set this on + /// structs (it will apply to all the fields), or individual fields. + visibility_kind: Option, + /// The kind of accessor this field will have. Also can be applied to + /// structs so all the fields inside share it by default. + accessor_kind: Option, + /// Whether this enum variant should be constified. + /// + /// This is controlled by the `constant` attribute, this way: + /// + /// ```cpp + /// enum Foo { + /// Bar = 0, /**<

*/ + /// Baz = 0, + /// }; + /// ``` + /// + /// In that case, bindgen will generate a constant for `Bar` instead of + /// `Baz`. + constify_enum_variant: bool, + /// List of explicit derives for this type. + derives: Vec, +} + +fn parse_accessor(s: &str) -> FieldAccessorKind { + match s { + "false" => FieldAccessorKind::None, + "unsafe" => FieldAccessorKind::Unsafe, + "immutable" => FieldAccessorKind::Immutable, + _ => FieldAccessorKind::Regular, + } +} + +impl Annotations { + /// Construct new annotations for the given cursor and its bindgen comments + /// (if any). + pub(crate) fn new(cursor: &clang::Cursor) -> Option { + let mut anno = Annotations::default(); + let mut matched_one = false; + anno.parse(&cursor.comment(), &mut matched_one); + + if matched_one { + Some(anno) + } else { + None + } + } + + /// Should this type be hidden? + pub(crate) fn hide(&self) -> bool { + self.hide + } + + /// Should this type be opaque? + pub(crate) fn opaque(&self) -> bool { + self.opaque + } + + /// For a given type, indicates the type it should replace. + /// + /// For example, in the following code: + /// + /// ```cpp + /// + /// /**
*/ + /// struct Foo { int x; }; + /// + /// struct Bar { char foo; }; + /// ``` + /// + /// the generated code would look something like: + /// + /// ``` + /// /**
*/ + /// struct Bar { + /// x: ::std::os::raw::c_int, + /// }; + /// ``` + /// + /// That is, code for `Foo` is used to generate `Bar`. + pub(crate) fn use_instead_of(&self) -> Option<&[String]> { + self.use_instead_of.as_deref() + } + + /// The list of derives that have been specified in this annotation. + pub(crate) fn derives(&self) -> &[String] { + &self.derives + } + + /// Should we avoid implementing the `Copy` trait? + pub(crate) fn disallow_copy(&self) -> bool { + self.disallow_copy + } + + /// Should we avoid implementing the `Debug` trait? + pub(crate) fn disallow_debug(&self) -> bool { + self.disallow_debug + } + + /// Should we avoid implementing the `Default` trait? + pub(crate) fn disallow_default(&self) -> bool { + self.disallow_default + } + + /// Should this type get a `#[must_use]` annotation? + pub(crate) fn must_use_type(&self) -> bool { + self.must_use_type + } + + /// What kind of accessors should we provide for this type's fields? + pub(crate) fn visibility_kind(&self) -> Option { + self.visibility_kind + } + + /// What kind of accessors should we provide for this type's fields? + pub(crate) fn accessor_kind(&self) -> Option { + self.accessor_kind + } + + fn parse(&mut self, comment: &clang::Comment, matched: &mut bool) { + use clang_sys::CXComment_HTMLStartTag; + if comment.kind() == CXComment_HTMLStartTag && + comment.get_tag_name() == "div" && + comment + .get_tag_attrs() + .next() + .map_or(false, |attr| attr.name == "rustbindgen") + { + *matched = true; + for attr in comment.get_tag_attrs() { + match attr.name.as_str() { + "opaque" => self.opaque = true, + "hide" => self.hide = true, + "nocopy" => self.disallow_copy = true, + "nodebug" => self.disallow_debug = true, + "nodefault" => self.disallow_default = true, + "mustusetype" => self.must_use_type = true, + "replaces" => { + self.use_instead_of = Some( + attr.value.split("::").map(Into::into).collect(), + ) + } + "derive" => self.derives.push(attr.value), + "private" => { + self.visibility_kind = if attr.value != "false" { + Some(FieldVisibilityKind::Private) + } else { + Some(FieldVisibilityKind::Public) + }; + } + "accessor" => { + self.accessor_kind = Some(parse_accessor(&attr.value)) + } + "constant" => self.constify_enum_variant = true, + _ => {} + } + } + } + + for child in comment.get_children() { + self.parse(&child, matched); + } + } + + /// Returns whether we've parsed a "constant" attribute. + pub(crate) fn constify_enum_variant(&self) -> bool { + self.constify_enum_variant + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/comment.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/comment.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/comment.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/comment.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,100 @@ +//! Utilities for manipulating C/C++ comments. + +/// The type of a comment. +#[derive(Debug, PartialEq, Eq)] +enum Kind { + /// A `///` comment, or something of the like. + /// All lines in a comment should start with the same symbol. + SingleLines, + /// A `/**` comment, where each other line can start with `*` and the + /// entire block ends with `*/`. + MultiLine, +} + +/// Preprocesses a C/C++ comment so that it is a valid Rust comment. +pub(crate) fn preprocess(comment: &str) -> String { + match self::kind(comment) { + Some(Kind::SingleLines) => preprocess_single_lines(comment), + Some(Kind::MultiLine) => preprocess_multi_line(comment), + None => comment.to_owned(), + } +} + +/// Gets the kind of the doc comment, if it is one. +fn kind(comment: &str) -> Option { + if comment.starts_with("/*") { + Some(Kind::MultiLine) + } else if comment.starts_with("//") { + Some(Kind::SingleLines) + } else { + None + } +} + +/// Preprocesses multiple single line comments. +/// +/// Handles lines starting with both `//` and `///`. +fn preprocess_single_lines(comment: &str) -> String { + debug_assert!(comment.starts_with("//"), "comment is not single line"); + + let lines: Vec<_> = comment + .lines() + .map(|l| l.trim().trim_start_matches('/')) + .collect(); + lines.join("\n") +} + +fn preprocess_multi_line(comment: &str) -> String { + let comment = comment + .trim_start_matches('/') + .trim_end_matches('/') + .trim_end_matches('*'); + + // Strip any potential `*` characters preceding each line. + let mut lines: Vec<_> = comment + .lines() + .map(|line| line.trim().trim_start_matches('*').trim_start_matches('!')) + .skip_while(|line| line.trim().is_empty()) // Skip the first empty lines. + .collect(); + + // Remove the trailing line corresponding to the `*/`. + if lines.last().map_or(false, |l| l.trim().is_empty()) { + lines.pop(); + } + + lines.join("\n") +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn picks_up_single_and_multi_line_doc_comments() { + assert_eq!(kind("/// hello"), Some(Kind::SingleLines)); + assert_eq!(kind("/** world */"), Some(Kind::MultiLine)); + } + + #[test] + fn processes_single_lines_correctly() { + assert_eq!(preprocess("///"), ""); + assert_eq!(preprocess("/// hello"), " hello"); + assert_eq!(preprocess("// hello"), " hello"); + assert_eq!(preprocess("// hello"), " hello"); + } + + #[test] + fn processes_multi_lines_correctly() { + assert_eq!(preprocess("/**/"), ""); + + assert_eq!( + preprocess("/** hello \n * world \n * foo \n */"), + " hello\n world\n foo" + ); + + assert_eq!( + preprocess("/**\nhello\n*world\n*foo\n*/"), + "hello\nworld\nfoo" + ); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/comp.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/comp.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/comp.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/comp.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,1875 @@ +//! Compound types (unions and structs) in our intermediate representation. + +use super::analysis::Sizedness; +use super::annotations::Annotations; +use super::context::{BindgenContext, FunctionId, ItemId, TypeId, VarId}; +use super::dot::DotAttributes; +use super::item::{IsOpaque, Item}; +use super::layout::Layout; +use super::template::TemplateParameters; +use super::traversal::{EdgeKind, Trace, Tracer}; +use super::ty::RUST_DERIVE_IN_ARRAY_LIMIT; +use crate::clang; +use crate::codegen::struct_layout::{align_to, bytes_from_bits_pow2}; +use crate::ir::derive::CanDeriveCopy; +use crate::parse::ParseError; +use crate::HashMap; +use crate::NonCopyUnionStyle; +use peeking_take_while::PeekableExt; +use std::cmp; +use std::io; +use std::mem; + +/// The kind of compound type. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum CompKind { + /// A struct. + Struct, + /// A union. + Union, +} + +/// The kind of C++ method. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum MethodKind { + /// A constructor. We represent it as method for convenience, to avoid code + /// duplication. + Constructor, + /// A destructor. + Destructor, + /// A virtual destructor. + VirtualDestructor { + /// Whether it's pure virtual. + pure_virtual: bool, + }, + /// A static method. + Static, + /// A normal method. + Normal, + /// A virtual method. + Virtual { + /// Whether it's pure virtual. + pure_virtual: bool, + }, +} + +impl MethodKind { + /// Is this a destructor method? + pub(crate) fn is_destructor(&self) -> bool { + matches!( + *self, + MethodKind::Destructor | MethodKind::VirtualDestructor { .. } + ) + } + + /// Is this a pure virtual method? + pub(crate) fn is_pure_virtual(&self) -> bool { + match *self { + MethodKind::Virtual { pure_virtual } | + MethodKind::VirtualDestructor { pure_virtual } => pure_virtual, + _ => false, + } + } +} + +/// A struct representing a C++ method, either static, normal, or virtual. +#[derive(Debug)] +pub(crate) struct Method { + kind: MethodKind, + /// The signature of the method. Take into account this is not a `Type` + /// item, but a `Function` one. + /// + /// This is tricky and probably this field should be renamed. + signature: FunctionId, + is_const: bool, +} + +impl Method { + /// Construct a new `Method`. + pub(crate) fn new( + kind: MethodKind, + signature: FunctionId, + is_const: bool, + ) -> Self { + Method { + kind, + signature, + is_const, + } + } + + /// What kind of method is this? + pub(crate) fn kind(&self) -> MethodKind { + self.kind + } + + /// Is this a constructor? + pub(crate) fn is_constructor(&self) -> bool { + self.kind == MethodKind::Constructor + } + + /// Is this a virtual method? + pub(crate) fn is_virtual(&self) -> bool { + matches!( + self.kind, + MethodKind::Virtual { .. } | MethodKind::VirtualDestructor { .. } + ) + } + + /// Is this a static method? + pub(crate) fn is_static(&self) -> bool { + self.kind == MethodKind::Static + } + + /// Get the ID for the `Function` signature for this method. + pub(crate) fn signature(&self) -> FunctionId { + self.signature + } + + /// Is this a const qualified method? + pub(crate) fn is_const(&self) -> bool { + self.is_const + } +} + +/// Methods common to the various field types. +pub(crate) trait FieldMethods { + /// Get the name of this field. + fn name(&self) -> Option<&str>; + + /// Get the type of this field. + fn ty(&self) -> TypeId; + + /// Get the comment for this field. + fn comment(&self) -> Option<&str>; + + /// If this is a bitfield, how many bits does it need? + fn bitfield_width(&self) -> Option; + + /// Is this feild declared public? + fn is_public(&self) -> bool; + + /// Get the annotations for this field. + fn annotations(&self) -> &Annotations; + + /// The offset of the field (in bits) + fn offset(&self) -> Option; +} + +/// A contiguous set of logical bitfields that live within the same physical +/// allocation unit. See 9.2.4 [class.bit] in the C++ standard and [section +/// 2.4.II.1 in the Itanium C++ +/// ABI](http://itanium-cxx-abi.github.io/cxx-abi/abi.html#class-types). +#[derive(Debug)] +pub(crate) struct BitfieldUnit { + nth: usize, + layout: Layout, + bitfields: Vec, +} + +impl BitfieldUnit { + /// Get the 1-based index of this bitfield unit within its containing + /// struct. Useful for generating a Rust struct's field name for this unit + /// of bitfields. + pub(crate) fn nth(&self) -> usize { + self.nth + } + + /// Get the layout within which these bitfields reside. + pub(crate) fn layout(&self) -> Layout { + self.layout + } + + /// Get the bitfields within this unit. + pub(crate) fn bitfields(&self) -> &[Bitfield] { + &self.bitfields + } +} + +/// A struct representing a C++ field. +#[derive(Debug)] +pub(crate) enum Field { + /// A normal data member. + DataMember(FieldData), + + /// A physical allocation unit containing many logical bitfields. + Bitfields(BitfieldUnit), +} + +impl Field { + /// Get this field's layout. + pub(crate) fn layout(&self, ctx: &BindgenContext) -> Option { + match *self { + Field::Bitfields(BitfieldUnit { layout, .. }) => Some(layout), + Field::DataMember(ref data) => { + ctx.resolve_type(data.ty).layout(ctx) + } + } + } +} + +impl Trace for Field { + type Extra = (); + + fn trace(&self, _: &BindgenContext, tracer: &mut T, _: &()) + where + T: Tracer, + { + match *self { + Field::DataMember(ref data) => { + tracer.visit_kind(data.ty.into(), EdgeKind::Field); + } + Field::Bitfields(BitfieldUnit { ref bitfields, .. }) => { + for bf in bitfields { + tracer.visit_kind(bf.ty().into(), EdgeKind::Field); + } + } + } + } +} + +impl DotAttributes for Field { + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + match *self { + Field::DataMember(ref data) => data.dot_attributes(ctx, out), + Field::Bitfields(BitfieldUnit { + layout, + ref bitfields, + .. + }) => { + writeln!( + out, + r#" + bitfield unit + + + + + + + + + "#, + layout.size, layout.align + )?; + for bf in bitfields { + bf.dot_attributes(ctx, out)?; + } + writeln!(out, "
unit.size{}
unit.align{}
") + } + } + } +} + +impl DotAttributes for FieldData { + fn dot_attributes( + &self, + _ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!( + out, + "{}{:?}", + self.name().unwrap_or("(anonymous)"), + self.ty() + ) + } +} + +impl DotAttributes for Bitfield { + fn dot_attributes( + &self, + _ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!( + out, + "{} : {}{:?}", + self.name().unwrap_or("(anonymous)"), + self.width(), + self.ty() + ) + } +} + +/// A logical bitfield within some physical bitfield allocation unit. +#[derive(Debug)] +pub(crate) struct Bitfield { + /// Index of the bit within this bitfield's allocation unit where this + /// bitfield's bits begin. + offset_into_unit: usize, + + /// The field data for this bitfield. + data: FieldData, + + /// Name of the generated Rust getter for this bitfield. + /// + /// Should be assigned before codegen. + getter_name: Option, + + /// Name of the generated Rust setter for this bitfield. + /// + /// Should be assigned before codegen. + setter_name: Option, +} + +impl Bitfield { + /// Construct a new bitfield. + fn new(offset_into_unit: usize, raw: RawField) -> Bitfield { + assert!(raw.bitfield_width().is_some()); + + Bitfield { + offset_into_unit, + data: raw.0, + getter_name: None, + setter_name: None, + } + } + + /// Get the index of the bit within this bitfield's allocation unit where + /// this bitfield begins. + pub(crate) fn offset_into_unit(&self) -> usize { + self.offset_into_unit + } + + /// Get the bit width of this bitfield. + pub(crate) fn width(&self) -> u32 { + self.data.bitfield_width().unwrap() + } + + /// Name of the generated Rust getter for this bitfield. + /// + /// Panics if called before assigning bitfield accessor names or if + /// this bitfield have no name. + pub(crate) fn getter_name(&self) -> &str { + assert!( + self.name().is_some(), + "`Bitfield::getter_name` called on anonymous field" + ); + self.getter_name.as_ref().expect( + "`Bitfield::getter_name` should only be called after\ + assigning bitfield accessor names", + ) + } + + /// Name of the generated Rust setter for this bitfield. + /// + /// Panics if called before assigning bitfield accessor names or if + /// this bitfield have no name. + pub(crate) fn setter_name(&self) -> &str { + assert!( + self.name().is_some(), + "`Bitfield::setter_name` called on anonymous field" + ); + self.setter_name.as_ref().expect( + "`Bitfield::setter_name` should only be called\ + after assigning bitfield accessor names", + ) + } +} + +impl FieldMethods for Bitfield { + fn name(&self) -> Option<&str> { + self.data.name() + } + + fn ty(&self) -> TypeId { + self.data.ty() + } + + fn comment(&self) -> Option<&str> { + self.data.comment() + } + + fn bitfield_width(&self) -> Option { + self.data.bitfield_width() + } + + fn is_public(&self) -> bool { + self.data.is_public() + } + + fn annotations(&self) -> &Annotations { + self.data.annotations() + } + + fn offset(&self) -> Option { + self.data.offset() + } +} + +/// A raw field might be either of a plain data member or a bitfield within a +/// bitfield allocation unit, but we haven't processed it and determined which +/// yet (which would involve allocating it into a bitfield unit if it is a +/// bitfield). +#[derive(Debug)] +struct RawField(FieldData); + +impl RawField { + /// Construct a new `RawField`. + fn new( + name: Option, + ty: TypeId, + comment: Option, + annotations: Option, + bitfield_width: Option, + public: bool, + offset: Option, + ) -> RawField { + RawField(FieldData { + name, + ty, + comment, + annotations: annotations.unwrap_or_default(), + bitfield_width, + public, + offset, + }) + } +} + +impl FieldMethods for RawField { + fn name(&self) -> Option<&str> { + self.0.name() + } + + fn ty(&self) -> TypeId { + self.0.ty() + } + + fn comment(&self) -> Option<&str> { + self.0.comment() + } + + fn bitfield_width(&self) -> Option { + self.0.bitfield_width() + } + + fn is_public(&self) -> bool { + self.0.is_public() + } + + fn annotations(&self) -> &Annotations { + self.0.annotations() + } + + fn offset(&self) -> Option { + self.0.offset() + } +} + +/// Convert the given ordered set of raw fields into a list of either plain data +/// members, and/or bitfield units containing multiple bitfields. +/// +/// If we do not have the layout for a bitfield's type, then we can't reliably +/// compute its allocation unit. In such cases, we return an error. +fn raw_fields_to_fields_and_bitfield_units( + ctx: &BindgenContext, + raw_fields: I, + packed: bool, +) -> Result<(Vec, bool), ()> +where + I: IntoIterator, +{ + let mut raw_fields = raw_fields.into_iter().fuse().peekable(); + let mut fields = vec![]; + let mut bitfield_unit_count = 0; + + loop { + // While we have plain old data members, just keep adding them to our + // resulting fields. We introduce a scope here so that we can use + // `raw_fields` again after the `by_ref` iterator adaptor is dropped. + { + let non_bitfields = raw_fields + .by_ref() + .peeking_take_while(|f| f.bitfield_width().is_none()) + .map(|f| Field::DataMember(f.0)); + fields.extend(non_bitfields); + } + + // Now gather all the consecutive bitfields. Only consecutive bitfields + // may potentially share a bitfield allocation unit with each other in + // the Itanium C++ ABI. + let mut bitfields = raw_fields + .by_ref() + .peeking_take_while(|f| f.bitfield_width().is_some()) + .peekable(); + + if bitfields.peek().is_none() { + break; + } + + bitfields_to_allocation_units( + ctx, + &mut bitfield_unit_count, + &mut fields, + bitfields, + packed, + )?; + } + + assert!( + raw_fields.next().is_none(), + "The above loop should consume all items in `raw_fields`" + ); + + Ok((fields, bitfield_unit_count != 0)) +} + +/// Given a set of contiguous raw bitfields, group and allocate them into +/// (potentially multiple) bitfield units. +fn bitfields_to_allocation_units( + ctx: &BindgenContext, + bitfield_unit_count: &mut usize, + fields: &mut E, + raw_bitfields: I, + packed: bool, +) -> Result<(), ()> +where + E: Extend, + I: IntoIterator, +{ + assert!(ctx.collected_typerefs()); + + // NOTE: What follows is reverse-engineered from LLVM's + // lib/AST/RecordLayoutBuilder.cpp + // + // FIXME(emilio): There are some differences between Microsoft and the + // Itanium ABI, but we'll ignore those and stick to Itanium for now. + // + // Also, we need to handle packed bitfields and stuff. + // + // TODO(emilio): Take into account C++'s wide bitfields, and + // packing, sigh. + + fn flush_allocation_unit( + fields: &mut E, + bitfield_unit_count: &mut usize, + unit_size_in_bits: usize, + unit_align_in_bits: usize, + bitfields: Vec, + packed: bool, + ) where + E: Extend, + { + *bitfield_unit_count += 1; + let align = if packed { + 1 + } else { + bytes_from_bits_pow2(unit_align_in_bits) + }; + let size = align_to(unit_size_in_bits, 8) / 8; + let layout = Layout::new(size, align); + fields.extend(Some(Field::Bitfields(BitfieldUnit { + nth: *bitfield_unit_count, + layout, + bitfields, + }))); + } + + let mut max_align = 0; + let mut unfilled_bits_in_unit = 0; + let mut unit_size_in_bits = 0; + let mut unit_align = 0; + let mut bitfields_in_unit = vec![]; + + // TODO(emilio): Determine this from attributes or pragma ms_struct + // directives. Also, perhaps we should check if the target is MSVC? + const is_ms_struct: bool = false; + + for bitfield in raw_bitfields { + let bitfield_width = bitfield.bitfield_width().unwrap() as usize; + let bitfield_layout = + ctx.resolve_type(bitfield.ty()).layout(ctx).ok_or(())?; + let bitfield_size = bitfield_layout.size; + let bitfield_align = bitfield_layout.align; + + let mut offset = unit_size_in_bits; + if !packed { + if is_ms_struct { + if unit_size_in_bits != 0 && + (bitfield_width == 0 || + bitfield_width > unfilled_bits_in_unit) + { + // We've reached the end of this allocation unit, so flush it + // and its bitfields. + unit_size_in_bits = + align_to(unit_size_in_bits, unit_align * 8); + flush_allocation_unit( + fields, + bitfield_unit_count, + unit_size_in_bits, + unit_align, + mem::take(&mut bitfields_in_unit), + packed, + ); + + // Now we're working on a fresh bitfield allocation unit, so reset + // the current unit size and alignment. + offset = 0; + unit_align = 0; + } + } else if offset != 0 && + (bitfield_width == 0 || + (offset & (bitfield_align * 8 - 1)) + bitfield_width > + bitfield_size * 8) + { + offset = align_to(offset, bitfield_align * 8); + } + } + + // According to the x86[-64] ABI spec: "Unnamed bit-fields’ types do not + // affect the alignment of a structure or union". This makes sense: such + // bit-fields are only used for padding, and we can't perform an + // un-aligned read of something we can't read because we can't even name + // it. + if bitfield.name().is_some() { + max_align = cmp::max(max_align, bitfield_align); + + // NB: The `bitfield_width` here is completely, absolutely + // intentional. Alignment of the allocation unit is based on the + // maximum bitfield width, not (directly) on the bitfields' types' + // alignment. + unit_align = cmp::max(unit_align, bitfield_width); + } + + // Always keep all bitfields around. While unnamed bitifields are used + // for padding (and usually not needed hereafter), large unnamed + // bitfields over their types size cause weird allocation size behavior from clang. + // Therefore, all bitfields needed to be kept around in order to check for this + // and make the struct opaque in this case + bitfields_in_unit.push(Bitfield::new(offset, bitfield)); + + unit_size_in_bits = offset + bitfield_width; + + // Compute what the physical unit's final size would be given what we + // have seen so far, and use that to compute how many bits are still + // available in the unit. + let data_size = align_to(unit_size_in_bits, bitfield_align * 8); + unfilled_bits_in_unit = data_size - unit_size_in_bits; + } + + if unit_size_in_bits != 0 { + // Flush the last allocation unit and its bitfields. + flush_allocation_unit( + fields, + bitfield_unit_count, + unit_size_in_bits, + unit_align, + bitfields_in_unit, + packed, + ); + } + + Ok(()) +} + +/// A compound structure's fields are initially raw, and have bitfields that +/// have not been grouped into allocation units. During this time, the fields +/// are mutable and we build them up during parsing. +/// +/// Then, once resolving typerefs is completed, we compute all structs' fields' +/// bitfield allocation units, and they remain frozen and immutable forever +/// after. +#[derive(Debug)] +enum CompFields { + Before(Vec), + After { + fields: Vec, + has_bitfield_units: bool, + }, + Error, +} + +impl Default for CompFields { + fn default() -> CompFields { + CompFields::Before(vec![]) + } +} + +impl CompFields { + fn append_raw_field(&mut self, raw: RawField) { + match *self { + CompFields::Before(ref mut raws) => { + raws.push(raw); + } + _ => { + panic!( + "Must not append new fields after computing bitfield allocation units" + ); + } + } + } + + fn compute_bitfield_units(&mut self, ctx: &BindgenContext, packed: bool) { + let raws = match *self { + CompFields::Before(ref mut raws) => mem::take(raws), + _ => { + panic!("Already computed bitfield units"); + } + }; + + let result = raw_fields_to_fields_and_bitfield_units(ctx, raws, packed); + + match result { + Ok((fields, has_bitfield_units)) => { + *self = CompFields::After { + fields, + has_bitfield_units, + }; + } + Err(()) => { + *self = CompFields::Error; + } + } + } + + fn deanonymize_fields(&mut self, ctx: &BindgenContext, methods: &[Method]) { + let fields = match *self { + CompFields::After { ref mut fields, .. } => fields, + // Nothing to do here. + CompFields::Error => return, + CompFields::Before(_) => { + panic!("Not yet computed bitfield units."); + } + }; + + fn has_method( + methods: &[Method], + ctx: &BindgenContext, + name: &str, + ) -> bool { + methods.iter().any(|method| { + let method_name = ctx.resolve_func(method.signature()).name(); + method_name == name || ctx.rust_mangle(method_name) == name + }) + } + + struct AccessorNamesPair { + getter: String, + setter: String, + } + + let mut accessor_names: HashMap = fields + .iter() + .flat_map(|field| match *field { + Field::Bitfields(ref bu) => &*bu.bitfields, + Field::DataMember(_) => &[], + }) + .filter_map(|bitfield| bitfield.name()) + .map(|bitfield_name| { + let bitfield_name = bitfield_name.to_string(); + let getter = { + let mut getter = + ctx.rust_mangle(&bitfield_name).to_string(); + if has_method(methods, ctx, &getter) { + getter.push_str("_bindgen_bitfield"); + } + getter + }; + let setter = { + let setter = format!("set_{}", bitfield_name); + let mut setter = ctx.rust_mangle(&setter).to_string(); + if has_method(methods, ctx, &setter) { + setter.push_str("_bindgen_bitfield"); + } + setter + }; + (bitfield_name, AccessorNamesPair { getter, setter }) + }) + .collect(); + + let mut anon_field_counter = 0; + for field in fields.iter_mut() { + match *field { + Field::DataMember(FieldData { ref mut name, .. }) => { + if name.is_some() { + continue; + } + + anon_field_counter += 1; + *name = Some(format!( + "{}{}", + ctx.options().anon_fields_prefix, + anon_field_counter + )); + } + Field::Bitfields(ref mut bu) => { + for bitfield in &mut bu.bitfields { + if bitfield.name().is_none() { + continue; + } + + if let Some(AccessorNamesPair { getter, setter }) = + accessor_names.remove(bitfield.name().unwrap()) + { + bitfield.getter_name = Some(getter); + bitfield.setter_name = Some(setter); + } + } + } + } + } + } +} + +impl Trace for CompFields { + type Extra = (); + + fn trace(&self, context: &BindgenContext, tracer: &mut T, _: &()) + where + T: Tracer, + { + match *self { + CompFields::Error => {} + CompFields::Before(ref fields) => { + for f in fields { + tracer.visit_kind(f.ty().into(), EdgeKind::Field); + } + } + CompFields::After { ref fields, .. } => { + for f in fields { + f.trace(context, tracer, &()); + } + } + } + } +} + +/// Common data shared across different field types. +#[derive(Clone, Debug)] +pub(crate) struct FieldData { + /// The name of the field, empty if it's an unnamed bitfield width. + name: Option, + + /// The inner type. + ty: TypeId, + + /// The doc comment on the field if any. + comment: Option, + + /// Annotations for this field, or the default. + annotations: Annotations, + + /// If this field is a bitfield, and how many bits does it contain if it is. + bitfield_width: Option, + + /// If the C++ field is declared `public` + public: bool, + + /// The offset of the field (in bits) + offset: Option, +} + +impl FieldMethods for FieldData { + fn name(&self) -> Option<&str> { + self.name.as_deref() + } + + fn ty(&self) -> TypeId { + self.ty + } + + fn comment(&self) -> Option<&str> { + self.comment.as_deref() + } + + fn bitfield_width(&self) -> Option { + self.bitfield_width + } + + fn is_public(&self) -> bool { + self.public + } + + fn annotations(&self) -> &Annotations { + &self.annotations + } + + fn offset(&self) -> Option { + self.offset + } +} + +/// The kind of inheritance a base class is using. +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) enum BaseKind { + /// Normal inheritance, like: + /// + /// ```cpp + /// class A : public B {}; + /// ``` + Normal, + /// Virtual inheritance, like: + /// + /// ```cpp + /// class A: public virtual B {}; + /// ``` + Virtual, +} + +/// A base class. +#[derive(Clone, Debug)] +pub(crate) struct Base { + /// The type of this base class. + pub(crate) ty: TypeId, + /// The kind of inheritance we're doing. + pub(crate) kind: BaseKind, + /// Name of the field in which this base should be stored. + pub(crate) field_name: String, + /// Whether this base is inherited from publically. + pub(crate) is_pub: bool, +} + +impl Base { + /// Whether this base class is inheriting virtually. + pub(crate) fn is_virtual(&self) -> bool { + self.kind == BaseKind::Virtual + } + + /// Whether this base class should have it's own field for storage. + pub(crate) fn requires_storage(&self, ctx: &BindgenContext) -> bool { + // Virtual bases are already taken into account by the vtable + // pointer. + // + // FIXME(emilio): Is this always right? + if self.is_virtual() { + return false; + } + + // NB: We won't include zero-sized types in our base chain because they + // would contribute to our size given the dummy field we insert for + // zero-sized types. + if self.ty.is_zero_sized(ctx) { + return false; + } + + true + } + + /// Whether this base is inherited from publically. + pub(crate) fn is_public(&self) -> bool { + self.is_pub + } +} + +/// A compound type. +/// +/// Either a struct or union, a compound type is built up from the combination +/// of fields which also are associated with their own (potentially compound) +/// type. +#[derive(Debug)] +pub(crate) struct CompInfo { + /// Whether this is a struct or a union. + kind: CompKind, + + /// The members of this struct or union. + fields: CompFields, + + /// The abstract template parameters of this class. Note that these are NOT + /// concrete template arguments, and should always be a + /// `Type(TypeKind::TypeParam(name))`. For concrete template arguments, see + /// `TypeKind::TemplateInstantiation`. + template_params: Vec, + + /// The method declarations inside this class, if in C++ mode. + methods: Vec, + + /// The different constructors this struct or class contains. + constructors: Vec, + + /// The destructor of this type. The bool represents whether this destructor + /// is virtual. + destructor: Option<(MethodKind, FunctionId)>, + + /// Vector of classes this one inherits from. + base_members: Vec, + + /// The inner types that were declared inside this class, in something like: + /// + /// class Foo { + /// typedef int FooTy; + /// struct Bar { + /// int baz; + /// }; + /// } + /// + /// static Foo::Bar const = {3}; + inner_types: Vec, + + /// Set of static constants declared inside this class. + inner_vars: Vec, + + /// Whether this type should generate an vtable (TODO: Should be able to + /// look at the virtual methods and ditch this field). + has_own_virtual_method: bool, + + /// Whether this type has destructor. + has_destructor: bool, + + /// Whether this type has a base type with more than one member. + /// + /// TODO: We should be able to compute this. + has_nonempty_base: bool, + + /// If this type has a template parameter which is not a type (e.g.: a + /// size_t) + has_non_type_template_params: bool, + + /// Whether this type has a bit field member whose width couldn't be + /// evaluated (e.g. if it depends on a template parameter). We generate an + /// opaque type in this case. + has_unevaluable_bit_field_width: bool, + + /// Whether we saw `__attribute__((packed))` on or within this type. + packed_attr: bool, + + /// Used to know if we've found an opaque attribute that could cause us to + /// generate a type with invalid layout. This is explicitly used to avoid us + /// generating bad alignments when parsing types like max_align_t. + /// + /// It's not clear what the behavior should be here, if generating the item + /// and pray, or behave as an opaque type. + found_unknown_attr: bool, + + /// Used to indicate when a struct has been forward declared. Usually used + /// in headers so that APIs can't modify them directly. + is_forward_declaration: bool, +} + +impl CompInfo { + /// Construct a new compound type. + pub(crate) fn new(kind: CompKind) -> Self { + CompInfo { + kind, + fields: CompFields::default(), + template_params: vec![], + methods: vec![], + constructors: vec![], + destructor: None, + base_members: vec![], + inner_types: vec![], + inner_vars: vec![], + has_own_virtual_method: false, + has_destructor: false, + has_nonempty_base: false, + has_non_type_template_params: false, + has_unevaluable_bit_field_width: false, + packed_attr: false, + found_unknown_attr: false, + is_forward_declaration: false, + } + } + + /// Compute the layout of this type. + /// + /// This is called as a fallback under some circumstances where LLVM doesn't + /// give us the correct layout. + /// + /// If we're a union without known layout, we try to compute it from our + /// members. This is not ideal, but clang fails to report the size for these + /// kind of unions, see test/headers/template_union.hpp + pub(crate) fn layout(&self, ctx: &BindgenContext) -> Option { + // We can't do better than clang here, sorry. + if self.kind == CompKind::Struct { + return None; + } + + // By definition, we don't have the right layout information here if + // we're a forward declaration. + if self.is_forward_declaration() { + return None; + } + + // empty union case + if !self.has_fields() { + return None; + } + + let mut max_size = 0; + // Don't allow align(0) + let mut max_align = 1; + self.each_known_field_layout(ctx, |layout| { + max_size = cmp::max(max_size, layout.size); + max_align = cmp::max(max_align, layout.align); + }); + + Some(Layout::new(max_size, max_align)) + } + + /// Get this type's set of fields. + pub(crate) fn fields(&self) -> &[Field] { + match self.fields { + CompFields::Error => &[], + CompFields::After { ref fields, .. } => fields, + CompFields::Before(..) => { + panic!("Should always have computed bitfield units first"); + } + } + } + + fn has_fields(&self) -> bool { + match self.fields { + CompFields::Error => false, + CompFields::After { ref fields, .. } => !fields.is_empty(), + CompFields::Before(ref raw_fields) => !raw_fields.is_empty(), + } + } + + fn each_known_field_layout( + &self, + ctx: &BindgenContext, + mut callback: impl FnMut(Layout), + ) { + match self.fields { + CompFields::Error => {} + CompFields::After { ref fields, .. } => { + for field in fields.iter() { + if let Some(layout) = field.layout(ctx) { + callback(layout); + } + } + } + CompFields::Before(ref raw_fields) => { + for field in raw_fields.iter() { + let field_ty = ctx.resolve_type(field.0.ty); + if let Some(layout) = field_ty.layout(ctx) { + callback(layout); + } + } + } + } + } + + fn has_bitfields(&self) -> bool { + match self.fields { + CompFields::Error => false, + CompFields::After { + has_bitfield_units, .. + } => has_bitfield_units, + CompFields::Before(_) => { + panic!("Should always have computed bitfield units first"); + } + } + } + + /// Returns whether we have a too large bitfield unit, in which case we may + /// not be able to derive some of the things we should be able to normally + /// derive. + pub(crate) fn has_too_large_bitfield_unit(&self) -> bool { + if !self.has_bitfields() { + return false; + } + self.fields().iter().any(|field| match *field { + Field::DataMember(..) => false, + Field::Bitfields(ref unit) => { + unit.layout.size > RUST_DERIVE_IN_ARRAY_LIMIT + } + }) + } + + /// Does this type have any template parameters that aren't types + /// (e.g. int)? + pub(crate) fn has_non_type_template_params(&self) -> bool { + self.has_non_type_template_params + } + + /// Do we see a virtual function during parsing? + /// Get the has_own_virtual_method boolean. + pub(crate) fn has_own_virtual_method(&self) -> bool { + self.has_own_virtual_method + } + + /// Did we see a destructor when parsing this type? + pub(crate) fn has_own_destructor(&self) -> bool { + self.has_destructor + } + + /// Get this type's set of methods. + pub(crate) fn methods(&self) -> &[Method] { + &self.methods + } + + /// Get this type's set of constructors. + pub(crate) fn constructors(&self) -> &[FunctionId] { + &self.constructors + } + + /// Get this type's destructor. + pub(crate) fn destructor(&self) -> Option<(MethodKind, FunctionId)> { + self.destructor + } + + /// What kind of compound type is this? + pub(crate) fn kind(&self) -> CompKind { + self.kind + } + + /// Is this a union? + pub(crate) fn is_union(&self) -> bool { + self.kind() == CompKind::Union + } + + /// The set of types that this one inherits from. + pub(crate) fn base_members(&self) -> &[Base] { + &self.base_members + } + + /// Construct a new compound type from a Clang type. + pub(crate) fn from_ty( + potential_id: ItemId, + ty: &clang::Type, + location: Option, + ctx: &mut BindgenContext, + ) -> Result { + use clang_sys::*; + assert!( + ty.template_args().is_none(), + "We handle template instantiations elsewhere" + ); + + let mut cursor = ty.declaration(); + let mut kind = Self::kind_from_cursor(&cursor); + if kind.is_err() { + if let Some(location) = location { + kind = Self::kind_from_cursor(&location); + cursor = location; + } + } + + let kind = kind?; + + debug!("CompInfo::from_ty({:?}, {:?})", kind, cursor); + + let mut ci = CompInfo::new(kind); + ci.is_forward_declaration = + location.map_or(true, |cur| match cur.kind() { + CXCursor_ParmDecl => true, + CXCursor_StructDecl | CXCursor_UnionDecl | + CXCursor_ClassDecl => !cur.is_definition(), + _ => false, + }); + + let mut maybe_anonymous_struct_field = None; + cursor.visit(|cur| { + if cur.kind() != CXCursor_FieldDecl { + if let Some((ty, clang_ty, public, offset)) = + maybe_anonymous_struct_field.take() + { + if cur.kind() == CXCursor_TypedefDecl && + cur.typedef_type().unwrap().canonical_type() == + clang_ty + { + // Typedefs of anonymous structs appear later in the ast + // than the struct itself, that would otherwise be an + // anonymous field. Detect that case here, and do + // nothing. + } else { + let field = RawField::new( + None, ty, None, None, None, public, offset, + ); + ci.fields.append_raw_field(field); + } + } + } + + match cur.kind() { + CXCursor_FieldDecl => { + if let Some((ty, clang_ty, public, offset)) = + maybe_anonymous_struct_field.take() + { + let mut used = false; + cur.visit(|child| { + if child.cur_type() == clang_ty { + used = true; + } + CXChildVisit_Continue + }); + + if !used { + let field = RawField::new( + None, ty, None, None, None, public, offset, + ); + ci.fields.append_raw_field(field); + } + } + + let bit_width = if cur.is_bit_field() { + let width = cur.bit_width(); + + // Make opaque type if the bit width couldn't be + // evaluated. + if width.is_none() { + ci.has_unevaluable_bit_field_width = true; + return CXChildVisit_Break; + } + + width + } else { + None + }; + + let field_type = Item::from_ty_or_ref( + cur.cur_type(), + cur, + Some(potential_id), + ctx, + ); + + let comment = cur.raw_comment(); + let annotations = Annotations::new(&cur); + let name = cur.spelling(); + let is_public = cur.public_accessible(); + let offset = cur.offset_of_field().ok(); + + // Name can be empty if there are bitfields, for example, + // see tests/headers/struct_with_bitfields.h + assert!( + !name.is_empty() || bit_width.is_some(), + "Empty field name?" + ); + + let name = if name.is_empty() { None } else { Some(name) }; + + let field = RawField::new( + name, + field_type, + comment, + annotations, + bit_width, + is_public, + offset, + ); + ci.fields.append_raw_field(field); + + // No we look for things like attributes and stuff. + cur.visit(|cur| { + if cur.kind() == CXCursor_UnexposedAttr { + ci.found_unknown_attr = true; + } + CXChildVisit_Continue + }); + } + CXCursor_UnexposedAttr => { + ci.found_unknown_attr = true; + } + CXCursor_EnumDecl | + CXCursor_TypeAliasDecl | + CXCursor_TypeAliasTemplateDecl | + CXCursor_TypedefDecl | + CXCursor_StructDecl | + CXCursor_UnionDecl | + CXCursor_ClassTemplate | + CXCursor_ClassDecl => { + // We can find non-semantic children here, clang uses a + // StructDecl to note incomplete structs that haven't been + // forward-declared before, see [1]. + // + // Also, clang seems to scope struct definitions inside + // unions, and other named struct definitions inside other + // structs to the whole translation unit. + // + // Let's just assume that if the cursor we've found is a + // definition, it's a valid inner type. + // + // [1]: https://github.com/rust-lang/rust-bindgen/issues/482 + let is_inner_struct = + cur.semantic_parent() == cursor || cur.is_definition(); + if !is_inner_struct { + return CXChildVisit_Continue; + } + + // Even if this is a definition, we may not be the semantic + // parent, see #1281. + let inner = Item::parse(cur, Some(potential_id), ctx) + .expect("Inner ClassDecl"); + + // If we avoided recursion parsing this type (in + // `Item::from_ty_with_id()`), then this might not be a + // valid type ID, so check and gracefully handle this. + if ctx.resolve_item_fallible(inner).is_some() { + let inner = inner.expect_type_id(ctx); + + ci.inner_types.push(inner); + + // A declaration of an union or a struct without name + // could also be an unnamed field, unfortunately. + if cur.is_anonymous() && cur.kind() != CXCursor_EnumDecl + { + let ty = cur.cur_type(); + let public = cur.public_accessible(); + let offset = cur.offset_of_field().ok(); + + maybe_anonymous_struct_field = + Some((inner, ty, public, offset)); + } + } + } + CXCursor_PackedAttr => { + ci.packed_attr = true; + } + CXCursor_TemplateTypeParameter => { + let param = Item::type_param(None, cur, ctx).expect( + "Item::type_param should't fail when pointing \ + at a TemplateTypeParameter", + ); + ci.template_params.push(param); + } + CXCursor_CXXBaseSpecifier => { + let is_virtual_base = cur.is_virtual_base(); + ci.has_own_virtual_method |= is_virtual_base; + + let kind = if is_virtual_base { + BaseKind::Virtual + } else { + BaseKind::Normal + }; + + let field_name = match ci.base_members.len() { + 0 => "_base".into(), + n => format!("_base_{}", n), + }; + let type_id = + Item::from_ty_or_ref(cur.cur_type(), cur, None, ctx); + ci.base_members.push(Base { + ty: type_id, + kind, + field_name, + is_pub: cur.access_specifier() == + clang_sys::CX_CXXPublic, + }); + } + CXCursor_Constructor | CXCursor_Destructor | + CXCursor_CXXMethod => { + let is_virtual = cur.method_is_virtual(); + let is_static = cur.method_is_static(); + debug_assert!(!(is_static && is_virtual), "How?"); + + ci.has_destructor |= cur.kind() == CXCursor_Destructor; + ci.has_own_virtual_method |= is_virtual; + + // This used to not be here, but then I tried generating + // stylo bindings with this (without path filters), and + // cried a lot with a method in gfx/Point.h + // (ToUnknownPoint), that somehow was causing the same type + // to be inserted in the map two times. + // + // I couldn't make a reduced test case, but anyway... + // Methods of template functions not only used to be inlined, + // but also instantiated, and we wouldn't be able to call + // them, so just bail out. + if !ci.template_params.is_empty() { + return CXChildVisit_Continue; + } + + // NB: This gets us an owned `Function`, not a + // `FunctionSig`. + let signature = + match Item::parse(cur, Some(potential_id), ctx) { + Ok(item) + if ctx + .resolve_item(item) + .kind() + .is_function() => + { + item + } + _ => return CXChildVisit_Continue, + }; + + let signature = signature.expect_function_id(ctx); + + match cur.kind() { + CXCursor_Constructor => { + ci.constructors.push(signature); + } + CXCursor_Destructor => { + let kind = if is_virtual { + MethodKind::VirtualDestructor { + pure_virtual: cur.method_is_pure_virtual(), + } + } else { + MethodKind::Destructor + }; + ci.destructor = Some((kind, signature)); + } + CXCursor_CXXMethod => { + let is_const = cur.method_is_const(); + let method_kind = if is_static { + MethodKind::Static + } else if is_virtual { + MethodKind::Virtual { + pure_virtual: cur.method_is_pure_virtual(), + } + } else { + MethodKind::Normal + }; + + let method = + Method::new(method_kind, signature, is_const); + + ci.methods.push(method); + } + _ => unreachable!("How can we see this here?"), + } + } + CXCursor_NonTypeTemplateParameter => { + ci.has_non_type_template_params = true; + } + CXCursor_VarDecl => { + let linkage = cur.linkage(); + if linkage != CXLinkage_External && + linkage != CXLinkage_UniqueExternal + { + return CXChildVisit_Continue; + } + + let visibility = cur.visibility(); + if visibility != CXVisibility_Default { + return CXChildVisit_Continue; + } + + if let Ok(item) = Item::parse(cur, Some(potential_id), ctx) + { + ci.inner_vars.push(item.as_var_id_unchecked()); + } + } + // Intentionally not handled + CXCursor_CXXAccessSpecifier | + CXCursor_CXXFinalAttr | + CXCursor_FunctionTemplate | + CXCursor_ConversionFunction => {} + _ => { + warn!( + "unhandled comp member `{}` (kind {:?}) in `{}` ({})", + cur.spelling(), + clang::kind_to_str(cur.kind()), + cursor.spelling(), + cur.location() + ); + } + } + CXChildVisit_Continue + }); + + if let Some((ty, _, public, offset)) = maybe_anonymous_struct_field { + let field = + RawField::new(None, ty, None, None, None, public, offset); + ci.fields.append_raw_field(field); + } + + Ok(ci) + } + + fn kind_from_cursor( + cursor: &clang::Cursor, + ) -> Result { + use clang_sys::*; + Ok(match cursor.kind() { + CXCursor_UnionDecl => CompKind::Union, + CXCursor_ClassDecl | CXCursor_StructDecl => CompKind::Struct, + CXCursor_CXXBaseSpecifier | + CXCursor_ClassTemplatePartialSpecialization | + CXCursor_ClassTemplate => match cursor.template_kind() { + CXCursor_UnionDecl => CompKind::Union, + _ => CompKind::Struct, + }, + _ => { + warn!("Unknown kind for comp type: {:?}", cursor); + return Err(ParseError::Continue); + } + }) + } + + /// Get the set of types that were declared within this compound type + /// (e.g. nested class definitions). + pub(crate) fn inner_types(&self) -> &[TypeId] { + &self.inner_types + } + + /// Get the set of static variables declared within this compound type. + pub(crate) fn inner_vars(&self) -> &[VarId] { + &self.inner_vars + } + + /// Have we found a field with an opaque type that could potentially mess up + /// the layout of this compound type? + pub(crate) fn found_unknown_attr(&self) -> bool { + self.found_unknown_attr + } + + /// Is this compound type packed? + pub(crate) fn is_packed( + &self, + ctx: &BindgenContext, + layout: Option<&Layout>, + ) -> bool { + if self.packed_attr { + return true; + } + + // Even though `libclang` doesn't expose `#pragma packed(...)`, we can + // detect it through its effects. + if let Some(parent_layout) = layout { + let mut packed = false; + self.each_known_field_layout(ctx, |layout| { + packed = packed || layout.align > parent_layout.align; + }); + if packed { + info!("Found a struct that was defined within `#pragma packed(...)`"); + return true; + } + + if self.has_own_virtual_method && parent_layout.align == 1 { + return true; + } + } + + false + } + + /// Returns true if compound type has been forward declared + pub(crate) fn is_forward_declaration(&self) -> bool { + self.is_forward_declaration + } + + /// Compute this compound structure's bitfield allocation units. + pub(crate) fn compute_bitfield_units( + &mut self, + ctx: &BindgenContext, + layout: Option<&Layout>, + ) { + let packed = self.is_packed(ctx, layout); + self.fields.compute_bitfield_units(ctx, packed) + } + + /// Assign for each anonymous field a generated name. + pub(crate) fn deanonymize_fields(&mut self, ctx: &BindgenContext) { + self.fields.deanonymize_fields(ctx, &self.methods); + } + + /// Returns whether the current union can be represented as a Rust `union` + /// + /// Requirements: + /// 1. Current RustTarget allows for `untagged_union` + /// 2. Each field can derive `Copy` or we use ManuallyDrop. + /// 3. It's not zero-sized. + /// + /// Second boolean returns whether all fields can be copied (and thus + /// ManuallyDrop is not needed). + pub(crate) fn is_rust_union( + &self, + ctx: &BindgenContext, + layout: Option<&Layout>, + name: &str, + ) -> (bool, bool) { + if !self.is_union() { + return (false, false); + } + + if !ctx.options().untagged_union { + return (false, false); + } + + if self.is_forward_declaration() { + return (false, false); + } + + let union_style = if ctx.options().bindgen_wrapper_union.matches(name) { + NonCopyUnionStyle::BindgenWrapper + } else if ctx.options().manually_drop_union.matches(name) { + NonCopyUnionStyle::ManuallyDrop + } else { + ctx.options().default_non_copy_union_style + }; + + let all_can_copy = self.fields().iter().all(|f| match *f { + Field::DataMember(ref field_data) => { + field_data.ty().can_derive_copy(ctx) + } + Field::Bitfields(_) => true, + }); + + if !all_can_copy && union_style == NonCopyUnionStyle::BindgenWrapper { + return (false, false); + } + + if layout.map_or(false, |l| l.size == 0) { + return (false, false); + } + + (true, all_can_copy) + } +} + +impl DotAttributes for CompInfo { + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!(out, "CompKind{:?}", self.kind)?; + + if self.has_own_virtual_method { + writeln!(out, "has_vtabletrue")?; + } + + if self.has_destructor { + writeln!(out, "has_destructortrue")?; + } + + if self.has_nonempty_base { + writeln!(out, "has_nonempty_basetrue")?; + } + + if self.has_non_type_template_params { + writeln!( + out, + "has_non_type_template_paramstrue" + )?; + } + + if self.packed_attr { + writeln!(out, "packed_attrtrue")?; + } + + if self.is_forward_declaration { + writeln!( + out, + "is_forward_declarationtrue" + )?; + } + + if !self.fields().is_empty() { + writeln!(out, r#"fields"#)?; + for field in self.fields() { + field.dot_attributes(ctx, out)?; + } + writeln!(out, "
")?; + } + + Ok(()) + } +} + +impl IsOpaque for CompInfo { + type Extra = Option; + + fn is_opaque(&self, ctx: &BindgenContext, layout: &Option) -> bool { + if self.has_non_type_template_params || + self.has_unevaluable_bit_field_width + { + return true; + } + + // When we do not have the layout for a bitfield's type (for example, it + // is a type parameter), then we can't compute bitfield units. We are + // left with no choice but to make the whole struct opaque, or else we + // might generate structs with incorrect sizes and alignments. + if let CompFields::Error = self.fields { + return true; + } + + // Bitfields with a width that is larger than their unit's width have + // some strange things going on, and the best we can do is make the + // whole struct opaque. + if self.fields().iter().any(|f| match *f { + Field::DataMember(_) => false, + Field::Bitfields(ref unit) => unit.bitfields().iter().any(|bf| { + let bitfield_layout = ctx + .resolve_type(bf.ty()) + .layout(ctx) + .expect("Bitfield without layout? Gah!"); + bf.width() / 8 > bitfield_layout.size as u32 + }), + }) { + return true; + } + + if !ctx.options().rust_features().repr_packed_n { + // If we don't have `#[repr(packed(N)]`, the best we can + // do is make this struct opaque. + // + // See https://github.com/rust-lang/rust-bindgen/issues/537 and + // https://github.com/rust-lang/rust/issues/33158 + if self.is_packed(ctx, layout.as_ref()) && + layout.map_or(false, |l| l.align > 1) + { + warn!("Found a type that is both packed and aligned to greater than \ + 1; Rust before version 1.33 doesn't have `#[repr(packed(N))]`, so we \ + are treating it as opaque. You may wish to set bindgen's rust target \ + version to 1.33 or later to enable `#[repr(packed(N))]` support."); + return true; + } + } + + false + } +} + +impl TemplateParameters for CompInfo { + fn self_template_params(&self, _ctx: &BindgenContext) -> Vec { + self.template_params.clone() + } +} + +impl Trace for CompInfo { + type Extra = Item; + + fn trace(&self, context: &BindgenContext, tracer: &mut T, item: &Item) + where + T: Tracer, + { + for p in item.all_template_params(context) { + tracer.visit_kind(p.into(), EdgeKind::TemplateParameterDefinition); + } + + for ty in self.inner_types() { + tracer.visit_kind(ty.into(), EdgeKind::InnerType); + } + + for &var in self.inner_vars() { + tracer.visit_kind(var.into(), EdgeKind::InnerVar); + } + + for method in self.methods() { + tracer.visit_kind(method.signature.into(), EdgeKind::Method); + } + + if let Some((_kind, signature)) = self.destructor() { + tracer.visit_kind(signature.into(), EdgeKind::Destructor); + } + + for ctor in self.constructors() { + tracer.visit_kind(ctor.into(), EdgeKind::Constructor); + } + + // Base members and fields are not generated for opaque types (but all + // of the above things are) so stop here. + if item.is_opaque(context, &()) { + return; + } + + for base in self.base_members() { + tracer.visit_kind(base.ty.into(), EdgeKind::BaseMember); + } + + self.fields.trace(context, tracer, &()); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/context.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/context.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/context.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/context.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,2981 @@ +//! Common context that is passed around during parsing and codegen. + +use super::super::time::Timer; +use super::analysis::{ + analyze, as_cannot_derive_set, CannotDerive, DeriveTrait, + HasDestructorAnalysis, HasFloat, HasTypeParameterInArray, + HasVtableAnalysis, HasVtableResult, SizednessAnalysis, SizednessResult, + UsedTemplateParameters, +}; +use super::derive::{ + CanDerive, CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, + CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, +}; +use super::function::Function; +use super::int::IntKind; +use super::item::{IsOpaque, Item, ItemAncestors, ItemSet}; +use super::item_kind::ItemKind; +use super::module::{Module, ModuleKind}; +use super::template::{TemplateInstantiation, TemplateParameters}; +use super::traversal::{self, Edge, ItemTraversal}; +use super::ty::{FloatKind, Type, TypeKind}; +use crate::clang::{self, Cursor}; +use crate::codegen::CodegenError; +use crate::BindgenOptions; +use crate::{Entry, HashMap, HashSet}; + +use proc_macro2::{Ident, Span, TokenStream}; +use quote::ToTokens; +use std::borrow::Cow; +use std::cell::{Cell, RefCell}; +use std::collections::{BTreeSet, HashMap as StdHashMap}; +use std::iter::IntoIterator; +use std::mem; + +/// An identifier for some kind of IR item. +#[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, Hash)] +pub(crate) struct ItemId(usize); + +/// Declare a newtype around `ItemId` with convesion methods. +macro_rules! item_id_newtype { + ( + $( #[$attr:meta] )* + pub(crate) struct $name:ident(ItemId) + where + $( #[$checked_attr:meta] )* + checked = $checked:ident with $check_method:ident, + $( #[$expected_attr:meta] )* + expected = $expected:ident, + $( #[$unchecked_attr:meta] )* + unchecked = $unchecked:ident; + ) => { + $( #[$attr] )* + #[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, Hash)] + pub(crate) struct $name(ItemId); + + impl $name { + /// Create an `ItemResolver` from this ID. + #[allow(dead_code)] + pub(crate) fn into_resolver(self) -> ItemResolver { + let id: ItemId = self.into(); + id.into() + } + } + + impl ::std::cmp::PartialEq for $name + where + T: Copy + Into + { + fn eq(&self, rhs: &T) -> bool { + let rhs: ItemId = (*rhs).into(); + self.0 == rhs + } + } + + impl From<$name> for ItemId { + fn from(id: $name) -> ItemId { + id.0 + } + } + + impl<'a> From<&'a $name> for ItemId { + fn from(id: &'a $name) -> ItemId { + id.0 + } + } + + #[allow(dead_code)] + impl ItemId { + $( #[$checked_attr] )* + pub(crate) fn $checked(&self, ctx: &BindgenContext) -> Option<$name> { + if ctx.resolve_item(*self).kind().$check_method() { + Some($name(*self)) + } else { + None + } + } + + $( #[$expected_attr] )* + pub(crate) fn $expected(&self, ctx: &BindgenContext) -> $name { + self.$checked(ctx) + .expect(concat!( + stringify!($expected), + " called with ItemId that points to the wrong ItemKind" + )) + } + + $( #[$unchecked_attr] )* + pub(crate) fn $unchecked(&self) -> $name { + $name(*self) + } + } + } +} + +item_id_newtype! { + /// An identifier for an `Item` whose `ItemKind` is known to be + /// `ItemKind::Type`. + pub(crate) struct TypeId(ItemId) + where + /// Convert this `ItemId` into a `TypeId` if its associated item is a type, + /// otherwise return `None`. + checked = as_type_id with is_type, + + /// Convert this `ItemId` into a `TypeId`. + /// + /// If this `ItemId` does not point to a type, then panic. + expected = expect_type_id, + + /// Convert this `ItemId` into a `TypeId` without actually checking whether + /// this ID actually points to a `Type`. + unchecked = as_type_id_unchecked; +} + +item_id_newtype! { + /// An identifier for an `Item` whose `ItemKind` is known to be + /// `ItemKind::Module`. + pub(crate) struct ModuleId(ItemId) + where + /// Convert this `ItemId` into a `ModuleId` if its associated item is a + /// module, otherwise return `None`. + checked = as_module_id with is_module, + + /// Convert this `ItemId` into a `ModuleId`. + /// + /// If this `ItemId` does not point to a module, then panic. + expected = expect_module_id, + + /// Convert this `ItemId` into a `ModuleId` without actually checking + /// whether this ID actually points to a `Module`. + unchecked = as_module_id_unchecked; +} + +item_id_newtype! { + /// An identifier for an `Item` whose `ItemKind` is known to be + /// `ItemKind::Var`. + pub(crate) struct VarId(ItemId) + where + /// Convert this `ItemId` into a `VarId` if its associated item is a var, + /// otherwise return `None`. + checked = as_var_id with is_var, + + /// Convert this `ItemId` into a `VarId`. + /// + /// If this `ItemId` does not point to a var, then panic. + expected = expect_var_id, + + /// Convert this `ItemId` into a `VarId` without actually checking whether + /// this ID actually points to a `Var`. + unchecked = as_var_id_unchecked; +} + +item_id_newtype! { + /// An identifier for an `Item` whose `ItemKind` is known to be + /// `ItemKind::Function`. + pub(crate) struct FunctionId(ItemId) + where + /// Convert this `ItemId` into a `FunctionId` if its associated item is a function, + /// otherwise return `None`. + checked = as_function_id with is_function, + + /// Convert this `ItemId` into a `FunctionId`. + /// + /// If this `ItemId` does not point to a function, then panic. + expected = expect_function_id, + + /// Convert this `ItemId` into a `FunctionId` without actually checking whether + /// this ID actually points to a `Function`. + unchecked = as_function_id_unchecked; +} + +impl From for usize { + fn from(id: ItemId) -> usize { + id.0 + } +} + +impl ItemId { + /// Get a numeric representation of this ID. + pub(crate) fn as_usize(&self) -> usize { + (*self).into() + } +} + +impl ::std::cmp::PartialEq for ItemId +where + T: Copy + Into, +{ + fn eq(&self, rhs: &T) -> bool { + let rhs: ItemId = (*rhs).into(); + self.0 == rhs.0 + } +} + +impl CanDeriveDebug for T +where + T: Copy + Into, +{ + fn can_derive_debug(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_debug && ctx.lookup_can_derive_debug(*self) + } +} + +impl CanDeriveDefault for T +where + T: Copy + Into, +{ + fn can_derive_default(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_default && ctx.lookup_can_derive_default(*self) + } +} + +impl CanDeriveCopy for T +where + T: Copy + Into, +{ + fn can_derive_copy(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_copy && ctx.lookup_can_derive_copy(*self) + } +} + +impl CanDeriveHash for T +where + T: Copy + Into, +{ + fn can_derive_hash(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_hash && ctx.lookup_can_derive_hash(*self) + } +} + +impl CanDerivePartialOrd for T +where + T: Copy + Into, +{ + fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_partialord && + ctx.lookup_can_derive_partialeq_or_partialord(*self) == + CanDerive::Yes + } +} + +impl CanDerivePartialEq for T +where + T: Copy + Into, +{ + fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_partialeq && + ctx.lookup_can_derive_partialeq_or_partialord(*self) == + CanDerive::Yes + } +} + +impl CanDeriveEq for T +where + T: Copy + Into, +{ + fn can_derive_eq(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_eq && + ctx.lookup_can_derive_partialeq_or_partialord(*self) == + CanDerive::Yes && + !ctx.lookup_has_float(*self) + } +} + +impl CanDeriveOrd for T +where + T: Copy + Into, +{ + fn can_derive_ord(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_ord && + ctx.lookup_can_derive_partialeq_or_partialord(*self) == + CanDerive::Yes && + !ctx.lookup_has_float(*self) + } +} + +/// A key used to index a resolved type, so we only process it once. +/// +/// This is almost always a USR string (an unique identifier generated by +/// clang), but it can also be the canonical declaration if the type is unnamed, +/// in which case clang may generate the same USR for multiple nested unnamed +/// types. +#[derive(Eq, PartialEq, Hash, Debug)] +enum TypeKey { + Usr(String), + Declaration(Cursor), +} + +/// A context used during parsing and generation of structs. +#[derive(Debug)] +pub(crate) struct BindgenContext { + /// The map of all the items parsed so far, keyed off ItemId. + items: Vec>, + + /// Clang USR to type map. This is needed to be able to associate types with + /// item ids during parsing. + types: HashMap, + + /// Maps from a cursor to the item ID of the named template type parameter + /// for that cursor. + type_params: HashMap, + + /// A cursor to module map. Similar reason than above. + modules: HashMap, + + /// The root module, this is guaranteed to be an item of kind Module. + root_module: ModuleId, + + /// Current module being traversed. + current_module: ModuleId, + + /// A HashMap keyed on a type definition, and whose value is the parent ID + /// of the declaration. + /// + /// This is used to handle the cases where the semantic and the lexical + /// parents of the cursor differ, like when a nested class is defined + /// outside of the parent class. + semantic_parents: HashMap, + + /// A stack with the current type declarations and types we're parsing. This + /// is needed to avoid infinite recursion when parsing a type like: + /// + /// struct c { struct c* next; }; + /// + /// This means effectively, that a type has a potential ID before knowing if + /// it's a correct type. But that's not important in practice. + /// + /// We could also use the `types` HashMap, but my intention with it is that + /// only valid types and declarations end up there, and this could + /// potentially break that assumption. + currently_parsed_types: Vec, + + /// A map with all the already parsed macro names. This is done to avoid + /// hard errors while parsing duplicated macros, as well to allow macro + /// expression parsing. + /// + /// This needs to be an std::HashMap because the cexpr API requires it. + parsed_macros: StdHashMap, cexpr::expr::EvalResult>, + + /// A set of all the included filenames. + deps: BTreeSet, + + /// The active replacements collected from replaces="xxx" annotations. + replacements: HashMap, ItemId>, + + collected_typerefs: bool, + + in_codegen: bool, + + /// The translation unit for parsing. + translation_unit: clang::TranslationUnit, + + /// Target information that can be useful for some stuff. + target_info: clang::TargetInfo, + + /// The options given by the user via cli or other medium. + options: BindgenOptions, + + /// Whether a bindgen complex was generated + generated_bindgen_complex: Cell, + + /// The set of `ItemId`s that are allowlisted. This the very first thing + /// computed after parsing our IR, and before running any of our analyses. + allowlisted: Option, + + /// Cache for calls to `ParseCallbacks::blocklisted_type_implements_trait` + blocklisted_types_implement_traits: + RefCell>>, + + /// The set of `ItemId`s that are allowlisted for code generation _and_ that + /// we should generate accounting for the codegen options. + /// + /// It's computed right after computing the allowlisted items. + codegen_items: Option, + + /// Map from an item's ID to the set of template parameter items that it + /// uses. See `ir::named` for more details. Always `Some` during the codegen + /// phase. + used_template_parameters: Option>, + + /// The set of `TypeKind::Comp` items found during parsing that need their + /// bitfield allocation units computed. Drained in `compute_bitfield_units`. + need_bitfield_allocation: Vec, + + /// The set of enums that are defined by a pair of `enum` and `typedef`, + /// which is legal in C (but not C++). + /// + /// ```c++ + /// // in either order + /// enum Enum { Variants... }; + /// typedef int16_t Enum; + /// ``` + /// + /// The stored `ItemId` is that of the `TypeKind::Enum`, not of the + /// `TypeKind::Alias`. + /// + /// This is populated when we enter codegen by `compute_enum_typedef_combos` + /// and is always `None` before that and `Some` after. + enum_typedef_combos: Option>, + + /// The set of (`ItemId`s of) types that can't derive debug. + /// + /// This is populated when we enter codegen by `compute_cannot_derive_debug` + /// and is always `None` before that and `Some` after. + cannot_derive_debug: Option>, + + /// The set of (`ItemId`s of) types that can't derive default. + /// + /// This is populated when we enter codegen by `compute_cannot_derive_default` + /// and is always `None` before that and `Some` after. + cannot_derive_default: Option>, + + /// The set of (`ItemId`s of) types that can't derive copy. + /// + /// This is populated when we enter codegen by `compute_cannot_derive_copy` + /// and is always `None` before that and `Some` after. + cannot_derive_copy: Option>, + + /// The set of (`ItemId`s of) types that can't derive hash. + /// + /// This is populated when we enter codegen by `compute_can_derive_hash` + /// and is always `None` before that and `Some` after. + cannot_derive_hash: Option>, + + /// The map why specified `ItemId`s of) types that can't derive hash. + /// + /// This is populated when we enter codegen by + /// `compute_cannot_derive_partialord_partialeq_or_eq` and is always `None` + /// before that and `Some` after. + cannot_derive_partialeq_or_partialord: Option>, + + /// The sizedness of types. + /// + /// This is populated by `compute_sizedness` and is always `None` before + /// that function is invoked and `Some` afterwards. + sizedness: Option>, + + /// The set of (`ItemId's of`) types that has vtable. + /// + /// Populated when we enter codegen by `compute_has_vtable`; always `None` + /// before that and `Some` after. + have_vtable: Option>, + + /// The set of (`ItemId's of`) types that has destructor. + /// + /// Populated when we enter codegen by `compute_has_destructor`; always `None` + /// before that and `Some` after. + have_destructor: Option>, + + /// The set of (`ItemId's of`) types that has array. + /// + /// Populated when we enter codegen by `compute_has_type_param_in_array`; always `None` + /// before that and `Some` after. + has_type_param_in_array: Option>, + + /// The set of (`ItemId's of`) types that has float. + /// + /// Populated when we enter codegen by `compute_has_float`; always `None` + /// before that and `Some` after. + has_float: Option>, +} + +/// A traversal of allowlisted items. +struct AllowlistedItemsTraversal<'ctx> { + ctx: &'ctx BindgenContext, + traversal: ItemTraversal<'ctx, ItemSet, Vec>, +} + +impl<'ctx> Iterator for AllowlistedItemsTraversal<'ctx> { + type Item = ItemId; + + fn next(&mut self) -> Option { + loop { + let id = self.traversal.next()?; + + if self.ctx.resolve_item(id).is_blocklisted(self.ctx) { + continue; + } + + return Some(id); + } + } +} + +impl<'ctx> AllowlistedItemsTraversal<'ctx> { + /// Construct a new allowlisted items traversal. + pub(crate) fn new( + ctx: &'ctx BindgenContext, + roots: R, + predicate: for<'a> fn(&'a BindgenContext, Edge) -> bool, + ) -> Self + where + R: IntoIterator, + { + AllowlistedItemsTraversal { + ctx, + traversal: ItemTraversal::new(ctx, roots, predicate), + } + } +} + +impl BindgenContext { + /// Construct the context for the given `options`. + pub(crate) fn new( + options: BindgenOptions, + input_unsaved_files: &[clang::UnsavedFile], + ) -> Self { + // TODO(emilio): Use the CXTargetInfo here when available. + // + // see: https://reviews.llvm.org/D32389 + let index = clang::Index::new(false, true); + + let parse_options = + clang_sys::CXTranslationUnit_DetailedPreprocessingRecord; + + let translation_unit = { + let _t = + Timer::new("translation_unit").with_output(options.time_phases); + + clang::TranslationUnit::parse( + &index, + "", + &options.clang_args, + input_unsaved_files, + parse_options, + ).expect("libclang error; possible causes include: +- Invalid flag syntax +- Unrecognized flags +- Invalid flag arguments +- File I/O errors +- Host vs. target architecture mismatch +If you encounter an error missing from this list, please file an issue or a PR!") + }; + + let target_info = clang::TargetInfo::new(&translation_unit); + let root_module = Self::build_root_module(ItemId(0)); + let root_module_id = root_module.id().as_module_id_unchecked(); + + // depfiles need to include the explicitly listed headers too + let deps = options.input_headers.iter().cloned().collect(); + + BindgenContext { + items: vec![Some(root_module)], + deps, + types: Default::default(), + type_params: Default::default(), + modules: Default::default(), + root_module: root_module_id, + current_module: root_module_id, + semantic_parents: Default::default(), + currently_parsed_types: vec![], + parsed_macros: Default::default(), + replacements: Default::default(), + collected_typerefs: false, + in_codegen: false, + translation_unit, + target_info, + options, + generated_bindgen_complex: Cell::new(false), + allowlisted: None, + blocklisted_types_implement_traits: Default::default(), + codegen_items: None, + used_template_parameters: None, + need_bitfield_allocation: Default::default(), + enum_typedef_combos: None, + cannot_derive_debug: None, + cannot_derive_default: None, + cannot_derive_copy: None, + cannot_derive_hash: None, + cannot_derive_partialeq_or_partialord: None, + sizedness: None, + have_vtable: None, + have_destructor: None, + has_type_param_in_array: None, + has_float: None, + } + } + + /// Returns `true` if the target architecture is wasm32 + pub(crate) fn is_target_wasm32(&self) -> bool { + self.target_info.triple.starts_with("wasm32-") + } + + /// Creates a timer for the current bindgen phase. If time_phases is `true`, + /// the timer will print to stderr when it is dropped, otherwise it will do + /// nothing. + pub(crate) fn timer<'a>(&self, name: &'a str) -> Timer<'a> { + Timer::new(name).with_output(self.options.time_phases) + } + + /// Returns the pointer width to use for the target for the current + /// translation. + pub(crate) fn target_pointer_size(&self) -> usize { + self.target_info.pointer_width / 8 + } + + /// Get the stack of partially parsed types that we are in the middle of + /// parsing. + pub(crate) fn currently_parsed_types(&self) -> &[PartialType] { + &self.currently_parsed_types[..] + } + + /// Begin parsing the given partial type, and push it onto the + /// `currently_parsed_types` stack so that we won't infinite recurse if we + /// run into a reference to it while parsing it. + pub(crate) fn begin_parsing(&mut self, partial_ty: PartialType) { + self.currently_parsed_types.push(partial_ty); + } + + /// Finish parsing the current partial type, pop it off the + /// `currently_parsed_types` stack, and return it. + pub(crate) fn finish_parsing(&mut self) -> PartialType { + self.currently_parsed_types.pop().expect( + "should have been parsing a type, if we finished parsing a type", + ) + } + + /// Add another path to the set of included files. + pub(crate) fn include_file(&mut self, filename: String) { + for cb in &self.options().parse_callbacks { + cb.include_file(&filename); + } + self.deps.insert(filename); + } + + /// Get any included files. + pub(crate) fn deps(&self) -> &BTreeSet { + &self.deps + } + + /// Define a new item. + /// + /// This inserts it into the internal items set, and its type into the + /// internal types set. + pub(crate) fn add_item( + &mut self, + item: Item, + declaration: Option, + location: Option, + ) { + debug!( + "BindgenContext::add_item({:?}, declaration: {:?}, loc: {:?}", + item, declaration, location + ); + debug_assert!( + declaration.is_some() || + !item.kind().is_type() || + item.kind().expect_type().is_builtin_or_type_param() || + item.kind().expect_type().is_opaque(self, &item) || + item.kind().expect_type().is_unresolved_ref(), + "Adding a type without declaration?" + ); + + let id = item.id(); + let is_type = item.kind().is_type(); + let is_unnamed = is_type && item.expect_type().name().is_none(); + let is_template_instantiation = + is_type && item.expect_type().is_template_instantiation(); + + if item.id() != self.root_module { + self.add_item_to_module(&item); + } + + if is_type && item.expect_type().is_comp() { + self.need_bitfield_allocation.push(id); + } + + let old_item = mem::replace(&mut self.items[id.0], Some(item)); + assert!( + old_item.is_none(), + "should not have already associated an item with the given id" + ); + + // Unnamed items can have an USR, but they can't be referenced from + // other sites explicitly and the USR can match if the unnamed items are + // nested, so don't bother tracking them. + if !is_type || is_template_instantiation { + return; + } + if let Some(mut declaration) = declaration { + if !declaration.is_valid() { + if let Some(location) = location { + if location.is_template_like() { + declaration = location; + } + } + } + declaration = declaration.canonical(); + if !declaration.is_valid() { + // This could happen, for example, with types like `int*` or + // similar. + // + // Fortunately, we don't care about those types being + // duplicated, so we can just ignore them. + debug!( + "Invalid declaration {:?} found for type {:?}", + declaration, + self.resolve_item_fallible(id) + .unwrap() + .kind() + .expect_type() + ); + return; + } + + let key = if is_unnamed { + TypeKey::Declaration(declaration) + } else if let Some(usr) = declaration.usr() { + TypeKey::Usr(usr) + } else { + warn!( + "Valid declaration with no USR: {:?}, {:?}", + declaration, location + ); + TypeKey::Declaration(declaration) + }; + + let old = self.types.insert(key, id.as_type_id_unchecked()); + debug_assert_eq!(old, None); + } + } + + /// Ensure that every item (other than the root module) is in a module's + /// children list. This is to make sure that every allowlisted item get's + /// codegen'd, even if its parent is not allowlisted. See issue #769 for + /// details. + fn add_item_to_module(&mut self, item: &Item) { + assert!(item.id() != self.root_module); + assert!(self.resolve_item_fallible(item.id()).is_none()); + + if let Some(ref mut parent) = self.items[item.parent_id().0] { + if let Some(module) = parent.as_module_mut() { + debug!( + "add_item_to_module: adding {:?} as child of parent module {:?}", + item.id(), + item.parent_id() + ); + + module.children_mut().insert(item.id()); + return; + } + } + + debug!( + "add_item_to_module: adding {:?} as child of current module {:?}", + item.id(), + self.current_module + ); + + self.items[(self.current_module.0).0] + .as_mut() + .expect("Should always have an item for self.current_module") + .as_module_mut() + .expect("self.current_module should always be a module") + .children_mut() + .insert(item.id()); + } + + /// Add a new named template type parameter to this context's item set. + pub(crate) fn add_type_param( + &mut self, + item: Item, + definition: clang::Cursor, + ) { + debug!( + "BindgenContext::add_type_param: item = {:?}; definition = {:?}", + item, definition + ); + + assert!( + item.expect_type().is_type_param(), + "Should directly be a named type, not a resolved reference or anything" + ); + assert_eq!( + definition.kind(), + clang_sys::CXCursor_TemplateTypeParameter + ); + + self.add_item_to_module(&item); + + let id = item.id(); + let old_item = mem::replace(&mut self.items[id.0], Some(item)); + assert!( + old_item.is_none(), + "should not have already associated an item with the given id" + ); + + let old_named_ty = self + .type_params + .insert(definition, id.as_type_id_unchecked()); + assert!( + old_named_ty.is_none(), + "should not have already associated a named type with this id" + ); + } + + /// Get the named type defined at the given cursor location, if we've + /// already added one. + pub(crate) fn get_type_param( + &self, + definition: &clang::Cursor, + ) -> Option { + assert_eq!( + definition.kind(), + clang_sys::CXCursor_TemplateTypeParameter + ); + self.type_params.get(definition).cloned() + } + + // TODO: Move all this syntax crap to other part of the code. + + /// Mangles a name so it doesn't conflict with any keyword. + #[rustfmt::skip] + pub(crate) fn rust_mangle<'a>(&self, name: &'a str) -> Cow<'a, str> { + if name.contains('@') || + name.contains('?') || + name.contains('$') || + matches!( + name, + "abstract" | "alignof" | "as" | "async" | "await" | "become" | + "box" | "break" | "const" | "continue" | "crate" | "do" | + "dyn" | "else" | "enum" | "extern" | "false" | "final" | + "fn" | "for" | "if" | "impl" | "in" | "let" | "loop" | + "macro" | "match" | "mod" | "move" | "mut" | "offsetof" | + "override" | "priv" | "proc" | "pub" | "pure" | "ref" | + "return" | "Self" | "self" | "sizeof" | "static" | + "struct" | "super" | "trait" | "true" | "try" | "type" | "typeof" | + "unsafe" | "unsized" | "use" | "virtual" | "where" | + "while" | "yield" | "str" | "bool" | "f32" | "f64" | + "usize" | "isize" | "u128" | "i128" | "u64" | "i64" | + "u32" | "i32" | "u16" | "i16" | "u8" | "i8" | "_" + ) + { + let mut s = name.to_owned(); + s = s.replace('@', "_"); + s = s.replace('?', "_"); + s = s.replace('$', "_"); + s.push('_'); + return Cow::Owned(s); + } + Cow::Borrowed(name) + } + + /// Returns a mangled name as a rust identifier. + pub(crate) fn rust_ident(&self, name: S) -> Ident + where + S: AsRef, + { + self.rust_ident_raw(self.rust_mangle(name.as_ref())) + } + + /// Returns a mangled name as a rust identifier. + pub(crate) fn rust_ident_raw(&self, name: T) -> Ident + where + T: AsRef, + { + Ident::new(name.as_ref(), Span::call_site()) + } + + /// Iterate over all items that have been defined. + pub(crate) fn items(&self) -> impl Iterator { + self.items.iter().enumerate().filter_map(|(index, item)| { + let item = item.as_ref()?; + Some((ItemId(index), item)) + }) + } + + /// Have we collected all unresolved type references yet? + pub(crate) fn collected_typerefs(&self) -> bool { + self.collected_typerefs + } + + /// Gather all the unresolved type references. + fn collect_typerefs( + &mut self, + ) -> Vec<(ItemId, clang::Type, clang::Cursor, Option)> { + debug_assert!(!self.collected_typerefs); + self.collected_typerefs = true; + let mut typerefs = vec![]; + + for (id, item) in self.items() { + let kind = item.kind(); + let ty = match kind.as_type() { + Some(ty) => ty, + None => continue, + }; + + if let TypeKind::UnresolvedTypeRef(ref ty, loc, parent_id) = + *ty.kind() + { + typerefs.push((id, *ty, loc, parent_id)); + }; + } + typerefs + } + + /// Collect all of our unresolved type references and resolve them. + fn resolve_typerefs(&mut self) { + let _t = self.timer("resolve_typerefs"); + + let typerefs = self.collect_typerefs(); + + for (id, ty, loc, parent_id) in typerefs { + let _resolved = + { + let resolved = Item::from_ty(&ty, loc, parent_id, self) + .unwrap_or_else(|_| { + warn!("Could not resolve type reference, falling back \ + to opaque blob"); + Item::new_opaque_type(self.next_item_id(), &ty, self) + }); + + let item = self.items[id.0].as_mut().unwrap(); + *item.kind_mut().as_type_mut().unwrap().kind_mut() = + TypeKind::ResolvedTypeRef(resolved); + resolved + }; + + // Something in the STL is trolling me. I don't need this assertion + // right now, but worth investigating properly once this lands. + // + // debug_assert!(self.items.get(&resolved).is_some(), "How?"); + // + // if let Some(parent_id) = parent_id { + // assert_eq!(self.items[&resolved].parent_id(), parent_id); + // } + } + } + + /// Temporarily loan `Item` with the given `ItemId`. This provides means to + /// mutably borrow `Item` while having a reference to `BindgenContext`. + /// + /// `Item` with the given `ItemId` is removed from the context, given + /// closure is executed and then `Item` is placed back. + /// + /// # Panics + /// + /// Panics if attempt to resolve given `ItemId` inside the given + /// closure is made. + fn with_loaned_item(&mut self, id: ItemId, f: F) -> T + where + F: (FnOnce(&BindgenContext, &mut Item) -> T), + { + let mut item = self.items[id.0].take().unwrap(); + + let result = f(self, &mut item); + + let existing = mem::replace(&mut self.items[id.0], Some(item)); + assert!(existing.is_none()); + + result + } + + /// Compute the bitfield allocation units for all `TypeKind::Comp` items we + /// parsed. + fn compute_bitfield_units(&mut self) { + let _t = self.timer("compute_bitfield_units"); + + assert!(self.collected_typerefs()); + + let need_bitfield_allocation = + mem::take(&mut self.need_bitfield_allocation); + for id in need_bitfield_allocation { + self.with_loaned_item(id, |ctx, item| { + let ty = item.kind_mut().as_type_mut().unwrap(); + let layout = ty.layout(ctx); + ty.as_comp_mut() + .unwrap() + .compute_bitfield_units(ctx, layout.as_ref()); + }); + } + } + + /// Assign a new generated name for each anonymous field. + fn deanonymize_fields(&mut self) { + let _t = self.timer("deanonymize_fields"); + + let comp_item_ids: Vec = self + .items() + .filter_map(|(id, item)| { + if item.kind().as_type()?.is_comp() { + return Some(id); + } + None + }) + .collect(); + + for id in comp_item_ids { + self.with_loaned_item(id, |ctx, item| { + item.kind_mut() + .as_type_mut() + .unwrap() + .as_comp_mut() + .unwrap() + .deanonymize_fields(ctx); + }); + } + } + + /// Iterate over all items and replace any item that has been named in a + /// `replaces="SomeType"` annotation with the replacement type. + fn process_replacements(&mut self) { + let _t = self.timer("process_replacements"); + if self.replacements.is_empty() { + debug!("No replacements to process"); + return; + } + + // FIXME: This is linear, but the replaces="xxx" annotation was already + // there, and for better or worse it's useful, sigh... + // + // We leverage the ResolvedTypeRef thing, though, which is cool :P. + + let mut replacements = vec![]; + + for (id, item) in self.items() { + if item.annotations().use_instead_of().is_some() { + continue; + } + + // Calls to `canonical_name` are expensive, so eagerly filter out + // items that cannot be replaced. + let ty = match item.kind().as_type() { + Some(ty) => ty, + None => continue, + }; + + match *ty.kind() { + TypeKind::Comp(..) | + TypeKind::TemplateAlias(..) | + TypeKind::Enum(..) | + TypeKind::Alias(..) => {} + _ => continue, + } + + let path = item.path_for_allowlisting(self); + let replacement = self.replacements.get(&path[1..]); + + if let Some(replacement) = replacement { + if *replacement != id { + // We set this just after parsing the annotation. It's + // very unlikely, but this can happen. + if self.resolve_item_fallible(*replacement).is_some() { + replacements.push(( + id.expect_type_id(self), + replacement.expect_type_id(self), + )); + } + } + } + } + + for (id, replacement_id) in replacements { + debug!("Replacing {:?} with {:?}", id, replacement_id); + let new_parent = { + let item_id: ItemId = id.into(); + let item = self.items[item_id.0].as_mut().unwrap(); + *item.kind_mut().as_type_mut().unwrap().kind_mut() = + TypeKind::ResolvedTypeRef(replacement_id); + item.parent_id() + }; + + // Relocate the replacement item from where it was declared, to + // where the thing it is replacing was declared. + // + // First, we'll make sure that its parent ID is correct. + + let old_parent = self.resolve_item(replacement_id).parent_id(); + if new_parent == old_parent { + // Same parent and therefore also same containing + // module. Nothing to do here. + continue; + } + + let replacement_item_id: ItemId = replacement_id.into(); + self.items[replacement_item_id.0] + .as_mut() + .unwrap() + .set_parent_for_replacement(new_parent); + + // Second, make sure that it is in the correct module's children + // set. + + let old_module = { + let immut_self = &*self; + old_parent + .ancestors(immut_self) + .chain(Some(immut_self.root_module.into())) + .find(|id| { + let item = immut_self.resolve_item(*id); + item.as_module().map_or(false, |m| { + m.children().contains(&replacement_id.into()) + }) + }) + }; + let old_module = old_module + .expect("Every replacement item should be in a module"); + + let new_module = { + let immut_self = &*self; + new_parent + .ancestors(immut_self) + .find(|id| immut_self.resolve_item(*id).is_module()) + }; + let new_module = + new_module.unwrap_or_else(|| self.root_module.into()); + + if new_module == old_module { + // Already in the correct module. + continue; + } + + self.items[old_module.0] + .as_mut() + .unwrap() + .as_module_mut() + .unwrap() + .children_mut() + .remove(&replacement_id.into()); + + self.items[new_module.0] + .as_mut() + .unwrap() + .as_module_mut() + .unwrap() + .children_mut() + .insert(replacement_id.into()); + } + } + + /// Enter the code generation phase, invoke the given callback `cb`, and + /// leave the code generation phase. + pub(crate) fn gen( + mut self, + cb: F, + ) -> Result<(Out, BindgenOptions), CodegenError> + where + F: FnOnce(&Self) -> Result, + { + self.in_codegen = true; + + self.resolve_typerefs(); + self.compute_bitfield_units(); + self.process_replacements(); + + self.deanonymize_fields(); + + self.assert_no_dangling_references(); + + // Compute the allowlisted set after processing replacements and + // resolving type refs, as those are the final mutations of the IR + // graph, and their completion means that the IR graph is now frozen. + self.compute_allowlisted_and_codegen_items(); + + // Make sure to do this after processing replacements, since that messes + // with the parentage and module children, and we want to assert that it + // messes with them correctly. + self.assert_every_item_in_a_module(); + + self.compute_has_vtable(); + self.compute_sizedness(); + self.compute_has_destructor(); + self.find_used_template_parameters(); + self.compute_enum_typedef_combos(); + self.compute_cannot_derive_debug(); + self.compute_cannot_derive_default(); + self.compute_cannot_derive_copy(); + self.compute_has_type_param_in_array(); + self.compute_has_float(); + self.compute_cannot_derive_hash(); + self.compute_cannot_derive_partialord_partialeq_or_eq(); + + let ret = cb(&self)?; + Ok((ret, self.options)) + } + + /// When the `testing_only_extra_assertions` feature is enabled, this + /// function walks the IR graph and asserts that we do not have any edges + /// referencing an ItemId for which we do not have an associated IR item. + fn assert_no_dangling_references(&self) { + if cfg!(feature = "testing_only_extra_assertions") { + for _ in self.assert_no_dangling_item_traversal() { + // The iterator's next method does the asserting for us. + } + } + } + + fn assert_no_dangling_item_traversal( + &self, + ) -> traversal::AssertNoDanglingItemsTraversal { + assert!(self.in_codegen_phase()); + assert!(self.current_module == self.root_module); + + let roots = self.items().map(|(id, _)| id); + traversal::AssertNoDanglingItemsTraversal::new( + self, + roots, + traversal::all_edges, + ) + } + + /// When the `testing_only_extra_assertions` feature is enabled, walk over + /// every item and ensure that it is in the children set of one of its + /// module ancestors. + fn assert_every_item_in_a_module(&self) { + if cfg!(feature = "testing_only_extra_assertions") { + assert!(self.in_codegen_phase()); + assert!(self.current_module == self.root_module); + + for (id, _item) in self.items() { + if id == self.root_module { + continue; + } + + assert!( + { + let id = id + .into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(self) + .id(); + id.ancestors(self) + .chain(Some(self.root_module.into())) + .any(|ancestor| { + debug!( + "Checking if {:?} is a child of {:?}", + id, ancestor + ); + self.resolve_item(ancestor) + .as_module() + .map_or(false, |m| { + m.children().contains(&id) + }) + }) + }, + "{:?} should be in some ancestor module's children set", + id + ); + } + } + } + + /// Compute for every type whether it is sized or not, and whether it is + /// sized or not as a base class. + fn compute_sizedness(&mut self) { + let _t = self.timer("compute_sizedness"); + assert!(self.sizedness.is_none()); + self.sizedness = Some(analyze::(self)); + } + + /// Look up whether the type with the given ID is sized or not. + pub(crate) fn lookup_sizedness(&self, id: TypeId) -> SizednessResult { + assert!( + self.in_codegen_phase(), + "We only compute sizedness after we've entered codegen" + ); + + self.sizedness + .as_ref() + .unwrap() + .get(&id) + .cloned() + .unwrap_or(SizednessResult::ZeroSized) + } + + /// Compute whether the type has vtable. + fn compute_has_vtable(&mut self) { + let _t = self.timer("compute_has_vtable"); + assert!(self.have_vtable.is_none()); + self.have_vtable = Some(analyze::(self)); + } + + /// Look up whether the item with `id` has vtable or not. + pub(crate) fn lookup_has_vtable(&self, id: TypeId) -> HasVtableResult { + assert!( + self.in_codegen_phase(), + "We only compute vtables when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` has a + // vtable or not. + self.have_vtable + .as_ref() + .unwrap() + .get(&id.into()) + .cloned() + .unwrap_or(HasVtableResult::No) + } + + /// Compute whether the type has a destructor. + fn compute_has_destructor(&mut self) { + let _t = self.timer("compute_has_destructor"); + assert!(self.have_destructor.is_none()); + self.have_destructor = Some(analyze::(self)); + } + + /// Look up whether the item with `id` has a destructor. + pub(crate) fn lookup_has_destructor(&self, id: TypeId) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute destructors when we enter codegen" + ); + + self.have_destructor.as_ref().unwrap().contains(&id.into()) + } + + fn find_used_template_parameters(&mut self) { + let _t = self.timer("find_used_template_parameters"); + if self.options.allowlist_recursively { + let used_params = analyze::(self); + self.used_template_parameters = Some(used_params); + } else { + // If you aren't recursively allowlisting, then we can't really make + // any sense of template parameter usage, and you're on your own. + let mut used_params = HashMap::default(); + for &id in self.allowlisted_items() { + used_params.entry(id).or_insert_with(|| { + id.self_template_params(self) + .into_iter() + .map(|p| p.into()) + .collect() + }); + } + self.used_template_parameters = Some(used_params); + } + } + + /// Return `true` if `item` uses the given `template_param`, `false` + /// otherwise. + /// + /// This method may only be called during the codegen phase, because the + /// template usage information is only computed as we enter the codegen + /// phase. + /// + /// If the item is blocklisted, then we say that it always uses the template + /// parameter. This is a little subtle. The template parameter usage + /// analysis only considers allowlisted items, and if any blocklisted item + /// shows up in the generated bindings, it is the user's responsibility to + /// manually provide a definition for them. To give them the most + /// flexibility when doing that, we assume that they use every template + /// parameter and always pass template arguments through in instantiations. + pub(crate) fn uses_template_parameter( + &self, + item: ItemId, + template_param: TypeId, + ) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute template parameter usage as we enter codegen" + ); + + if self.resolve_item(item).is_blocklisted(self) { + return true; + } + + let template_param = template_param + .into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(self) + .id(); + + self.used_template_parameters + .as_ref() + .expect("should have found template parameter usage if we're in codegen") + .get(&item) + .map_or(false, |items_used_params| items_used_params.contains(&template_param)) + } + + /// Return `true` if `item` uses any unbound, generic template parameters, + /// `false` otherwise. + /// + /// Has the same restrictions that `uses_template_parameter` has. + pub(crate) fn uses_any_template_parameters(&self, item: ItemId) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute template parameter usage as we enter codegen" + ); + + self.used_template_parameters + .as_ref() + .expect( + "should have template parameter usage info in codegen phase", + ) + .get(&item) + .map_or(false, |used| !used.is_empty()) + } + + // This deserves a comment. Builtin types don't get a valid declaration, so + // we can't add it to the cursor->type map. + // + // That being said, they're not generated anyway, and are few, so the + // duplication and special-casing is fine. + // + // If at some point we care about the memory here, probably a map TypeKind + // -> builtin type ItemId would be the best to improve that. + fn add_builtin_item(&mut self, item: Item) { + debug!("add_builtin_item: item = {:?}", item); + debug_assert!(item.kind().is_type()); + self.add_item_to_module(&item); + let id = item.id(); + let old_item = mem::replace(&mut self.items[id.0], Some(item)); + assert!(old_item.is_none(), "Inserted type twice?"); + } + + fn build_root_module(id: ItemId) -> Item { + let module = Module::new(Some("root".into()), ModuleKind::Normal); + Item::new(id, None, None, id, ItemKind::Module(module), None) + } + + /// Get the root module. + pub(crate) fn root_module(&self) -> ModuleId { + self.root_module + } + + /// Resolve a type with the given ID. + /// + /// Panics if there is no item for the given `TypeId` or if the resolved + /// item is not a `Type`. + pub(crate) fn resolve_type(&self, type_id: TypeId) -> &Type { + self.resolve_item(type_id).kind().expect_type() + } + + /// Resolve a function with the given ID. + /// + /// Panics if there is no item for the given `FunctionId` or if the resolved + /// item is not a `Function`. + pub(crate) fn resolve_func(&self, func_id: FunctionId) -> &Function { + self.resolve_item(func_id).kind().expect_function() + } + + /// Resolve the given `ItemId` as a type, or `None` if there is no item with + /// the given ID. + /// + /// Panics if the ID resolves to an item that is not a type. + pub(crate) fn safe_resolve_type(&self, type_id: TypeId) -> Option<&Type> { + self.resolve_item_fallible(type_id) + .map(|t| t.kind().expect_type()) + } + + /// Resolve the given `ItemId` into an `Item`, or `None` if no such item + /// exists. + pub(crate) fn resolve_item_fallible>( + &self, + id: Id, + ) -> Option<&Item> { + self.items.get(id.into().0)?.as_ref() + } + + /// Resolve the given `ItemId` into an `Item`. + /// + /// Panics if the given ID does not resolve to any item. + pub(crate) fn resolve_item>(&self, item_id: Id) -> &Item { + let item_id = item_id.into(); + match self.resolve_item_fallible(item_id) { + Some(item) => item, + None => panic!("Not an item: {:?}", item_id), + } + } + + /// Get the current module. + pub(crate) fn current_module(&self) -> ModuleId { + self.current_module + } + + /// Add a semantic parent for a given type definition. + /// + /// We do this from the type declaration, in order to be able to find the + /// correct type definition afterwards. + /// + /// TODO(emilio): We could consider doing this only when + /// declaration.lexical_parent() != definition.lexical_parent(), but it's + /// not sure it's worth it. + pub(crate) fn add_semantic_parent( + &mut self, + definition: clang::Cursor, + parent_id: ItemId, + ) { + self.semantic_parents.insert(definition, parent_id); + } + + /// Returns a known semantic parent for a given definition. + pub(crate) fn known_semantic_parent( + &self, + definition: clang::Cursor, + ) -> Option { + self.semantic_parents.get(&definition).cloned() + } + + /// Given a cursor pointing to the location of a template instantiation, + /// return a tuple of the form `(declaration_cursor, declaration_id, + /// num_expected_template_args)`. + /// + /// Note that `declaration_id` is not guaranteed to be in the context's item + /// set! It is possible that it is a partial type that we are still in the + /// middle of parsing. + fn get_declaration_info_for_template_instantiation( + &self, + instantiation: &Cursor, + ) -> Option<(Cursor, ItemId, usize)> { + instantiation + .cur_type() + .canonical_declaration(Some(instantiation)) + .and_then(|canon_decl| { + self.get_resolved_type(&canon_decl).and_then( + |template_decl_id| { + let num_params = + template_decl_id.num_self_template_params(self); + if num_params == 0 { + None + } else { + Some(( + *canon_decl.cursor(), + template_decl_id.into(), + num_params, + )) + } + }, + ) + }) + .or_else(|| { + // If we haven't already parsed the declaration of + // the template being instantiated, then it *must* + // be on the stack of types we are currently + // parsing. If it wasn't then clang would have + // already errored out before we started + // constructing our IR because you can't instantiate + // a template until it is fully defined. + instantiation + .referenced() + .and_then(|referenced| { + self.currently_parsed_types() + .iter() + .find(|partial_ty| *partial_ty.decl() == referenced) + .cloned() + }) + .and_then(|template_decl| { + let num_template_params = + template_decl.num_self_template_params(self); + if num_template_params == 0 { + None + } else { + Some(( + *template_decl.decl(), + template_decl.id(), + num_template_params, + )) + } + }) + }) + } + + /// Parse a template instantiation, eg `Foo`. + /// + /// This is surprisingly difficult to do with libclang, due to the fact that + /// it doesn't provide explicit template argument information, except for + /// function template declarations(!?!??!). + /// + /// The only way to do this is manually inspecting the AST and looking for + /// TypeRefs and TemplateRefs inside. This, unfortunately, doesn't work for + /// more complex cases, see the comment on the assertion below. + /// + /// To add insult to injury, the AST itself has structure that doesn't make + /// sense. Sometimes `Foo>` has an AST with nesting like you might + /// expect: `(Foo (Bar (int)))`. Other times, the AST we get is completely + /// flat: `(Foo Bar int)`. + /// + /// To see an example of what this method handles: + /// + /// ```c++ + /// template + /// class Incomplete { + /// T p; + /// }; + /// + /// template + /// class Foo { + /// Incomplete bar; + /// }; + /// ``` + /// + /// Finally, template instantiations are always children of the current + /// module. They use their template's definition for their name, so the + /// parent is only useful for ensuring that their layout tests get + /// codegen'd. + fn instantiate_template( + &mut self, + with_id: ItemId, + template: TypeId, + ty: &clang::Type, + location: clang::Cursor, + ) -> Option { + let num_expected_args = + self.resolve_type(template).num_self_template_params(self); + if num_expected_args == 0 { + warn!( + "Tried to instantiate a template for which we could not \ + determine any template parameters" + ); + return None; + } + + let mut args = vec![]; + let mut found_const_arg = false; + let mut children = location.collect_children(); + + if children.iter().all(|c| !c.has_children()) { + // This is insanity... If clang isn't giving us a properly nested + // AST for which template arguments belong to which template we are + // instantiating, we'll need to construct it ourselves. However, + // there is an extra `NamespaceRef, NamespaceRef, ..., TemplateRef` + // representing a reference to the outermost template declaration + // that we need to filter out of the children. We need to do this + // filtering because we already know which template declaration is + // being specialized via the `location`'s type, and if we do not + // filter it out, we'll add an extra layer of template instantiation + // on accident. + let idx = children + .iter() + .position(|c| c.kind() == clang_sys::CXCursor_TemplateRef); + if let Some(idx) = idx { + if children + .iter() + .take(idx) + .all(|c| c.kind() == clang_sys::CXCursor_NamespaceRef) + { + children = children.into_iter().skip(idx + 1).collect(); + } + } + } + + for child in children.iter().rev() { + match child.kind() { + clang_sys::CXCursor_TypeRef | + clang_sys::CXCursor_TypedefDecl | + clang_sys::CXCursor_TypeAliasDecl => { + // The `with_id` ID will potentially end up unused if we give up + // on this type (for example, because it has const value + // template args), so if we pass `with_id` as the parent, it is + // potentially a dangling reference. Instead, use the canonical + // template declaration as the parent. It is already parsed and + // has a known-resolvable `ItemId`. + let ty = Item::from_ty_or_ref( + child.cur_type(), + *child, + Some(template.into()), + self, + ); + args.push(ty); + } + clang_sys::CXCursor_TemplateRef => { + let ( + template_decl_cursor, + template_decl_id, + num_expected_template_args, + ) = self.get_declaration_info_for_template_instantiation( + child, + )?; + + if num_expected_template_args == 0 || + child.has_at_least_num_children( + num_expected_template_args, + ) + { + // Do a happy little parse. See comment in the TypeRef + // match arm about parent IDs. + let ty = Item::from_ty_or_ref( + child.cur_type(), + *child, + Some(template.into()), + self, + ); + args.push(ty); + } else { + // This is the case mentioned in the doc comment where + // clang gives us a flattened AST and we have to + // reconstruct which template arguments go to which + // instantiation :( + let args_len = args.len(); + if args_len < num_expected_template_args { + warn!( + "Found a template instantiation without \ + enough template arguments" + ); + return None; + } + + let mut sub_args: Vec<_> = args + .drain(args_len - num_expected_template_args..) + .collect(); + sub_args.reverse(); + + let sub_name = Some(template_decl_cursor.spelling()); + let sub_inst = TemplateInstantiation::new( + // This isn't guaranteed to be a type that we've + // already finished parsing yet. + template_decl_id.as_type_id_unchecked(), + sub_args, + ); + let sub_kind = + TypeKind::TemplateInstantiation(sub_inst); + let sub_ty = Type::new( + sub_name, + template_decl_cursor + .cur_type() + .fallible_layout(self) + .ok(), + sub_kind, + false, + ); + let sub_id = self.next_item_id(); + let sub_item = Item::new( + sub_id, + None, + None, + self.current_module.into(), + ItemKind::Type(sub_ty), + Some(child.location()), + ); + + // Bypass all the validations in add_item explicitly. + debug!( + "instantiate_template: inserting nested \ + instantiation item: {:?}", + sub_item + ); + self.add_item_to_module(&sub_item); + debug_assert_eq!(sub_id, sub_item.id()); + self.items[sub_id.0] = Some(sub_item); + args.push(sub_id.as_type_id_unchecked()); + } + } + _ => { + warn!( + "Found template arg cursor we can't handle: {:?}", + child + ); + found_const_arg = true; + } + } + } + + if found_const_arg { + // This is a dependently typed template instantiation. That is, an + // instantiation of a template with one or more const values as + // template arguments, rather than only types as template + // arguments. For example, `Foo` versus `Bar`. + // We can't handle these instantiations, so just punt in this + // situation... + warn!( + "Found template instantiated with a const value; \ + bindgen can't handle this kind of template instantiation!" + ); + return None; + } + + if args.len() != num_expected_args { + warn!( + "Found a template with an unexpected number of template \ + arguments" + ); + return None; + } + + args.reverse(); + let type_kind = TypeKind::TemplateInstantiation( + TemplateInstantiation::new(template, args), + ); + let name = ty.spelling(); + let name = if name.is_empty() { None } else { Some(name) }; + let ty = Type::new( + name, + ty.fallible_layout(self).ok(), + type_kind, + ty.is_const(), + ); + let item = Item::new( + with_id, + None, + None, + self.current_module.into(), + ItemKind::Type(ty), + Some(location.location()), + ); + + // Bypass all the validations in add_item explicitly. + debug!("instantiate_template: inserting item: {:?}", item); + self.add_item_to_module(&item); + debug_assert_eq!(with_id, item.id()); + self.items[with_id.0] = Some(item); + Some(with_id.as_type_id_unchecked()) + } + + /// If we have already resolved the type for the given type declaration, + /// return its `ItemId`. Otherwise, return `None`. + pub(crate) fn get_resolved_type( + &self, + decl: &clang::CanonicalTypeDeclaration, + ) -> Option { + self.types + .get(&TypeKey::Declaration(*decl.cursor())) + .or_else(|| { + decl.cursor() + .usr() + .and_then(|usr| self.types.get(&TypeKey::Usr(usr))) + }) + .cloned() + } + + /// Looks up for an already resolved type, either because it's builtin, or + /// because we already have it in the map. + pub(crate) fn builtin_or_resolved_ty( + &mut self, + with_id: ItemId, + parent_id: Option, + ty: &clang::Type, + location: Option, + ) -> Option { + use clang_sys::{CXCursor_TypeAliasTemplateDecl, CXCursor_TypeRef}; + debug!( + "builtin_or_resolved_ty: {:?}, {:?}, {:?}, {:?}", + ty, location, with_id, parent_id + ); + + if let Some(decl) = ty.canonical_declaration(location.as_ref()) { + if let Some(id) = self.get_resolved_type(&decl) { + debug!( + "Already resolved ty {:?}, {:?}, {:?} {:?}", + id, decl, ty, location + ); + // If the declaration already exists, then either: + // + // * the declaration is a template declaration of some sort, + // and we are looking at an instantiation or specialization + // of it, or + // * we have already parsed and resolved this type, and + // there's nothing left to do. + if let Some(location) = location { + if decl.cursor().is_template_like() && + *ty != decl.cursor().cur_type() + { + // For specialized type aliases, there's no way to get the + // template parameters as of this writing (for a struct + // specialization we wouldn't be in this branch anyway). + // + // Explicitly return `None` if there aren't any + // unspecialized parameters (contains any `TypeRef`) so we + // resolve the canonical type if there is one and it's + // exposed. + // + // This is _tricky_, I know :( + if decl.cursor().kind() == + CXCursor_TypeAliasTemplateDecl && + !location.contains_cursor(CXCursor_TypeRef) && + ty.canonical_type().is_valid_and_exposed() + { + return None; + } + + return self + .instantiate_template(with_id, id, ty, location) + .or(Some(id)); + } + } + + return Some(self.build_ty_wrapper(with_id, id, parent_id, ty)); + } + } + + debug!("Not resolved, maybe builtin?"); + self.build_builtin_ty(ty) + } + + /// Make a new item that is a resolved type reference to the `wrapped_id`. + /// + /// This is unfortunately a lot of bloat, but is needed to properly track + /// constness et al. + /// + /// We should probably make the constness tracking separate, so it doesn't + /// bloat that much, but hey, we already bloat the heck out of builtin + /// types. + pub(crate) fn build_ty_wrapper( + &mut self, + with_id: ItemId, + wrapped_id: TypeId, + parent_id: Option, + ty: &clang::Type, + ) -> TypeId { + self.build_wrapper(with_id, wrapped_id, parent_id, ty, ty.is_const()) + } + + /// A wrapper over a type that adds a const qualifier explicitly. + /// + /// Needed to handle const methods in C++, wrapping the type . + pub(crate) fn build_const_wrapper( + &mut self, + with_id: ItemId, + wrapped_id: TypeId, + parent_id: Option, + ty: &clang::Type, + ) -> TypeId { + self.build_wrapper( + with_id, wrapped_id, parent_id, ty, /* is_const = */ true, + ) + } + + fn build_wrapper( + &mut self, + with_id: ItemId, + wrapped_id: TypeId, + parent_id: Option, + ty: &clang::Type, + is_const: bool, + ) -> TypeId { + let spelling = ty.spelling(); + let layout = ty.fallible_layout(self).ok(); + let location = ty.declaration().location(); + let type_kind = TypeKind::ResolvedTypeRef(wrapped_id); + let ty = Type::new(Some(spelling), layout, type_kind, is_const); + let item = Item::new( + with_id, + None, + None, + parent_id.unwrap_or_else(|| self.current_module.into()), + ItemKind::Type(ty), + Some(location), + ); + self.add_builtin_item(item); + with_id.as_type_id_unchecked() + } + + /// Returns the next item ID to be used for an item. + pub(crate) fn next_item_id(&mut self) -> ItemId { + let ret = ItemId(self.items.len()); + self.items.push(None); + ret + } + + fn build_builtin_ty(&mut self, ty: &clang::Type) -> Option { + use clang_sys::*; + let type_kind = match ty.kind() { + CXType_NullPtr => TypeKind::NullPtr, + CXType_Void => TypeKind::Void, + CXType_Bool => TypeKind::Int(IntKind::Bool), + CXType_Int => TypeKind::Int(IntKind::Int), + CXType_UInt => TypeKind::Int(IntKind::UInt), + CXType_Char_S => TypeKind::Int(IntKind::Char { is_signed: true }), + CXType_Char_U => TypeKind::Int(IntKind::Char { is_signed: false }), + CXType_SChar => TypeKind::Int(IntKind::SChar), + CXType_UChar => TypeKind::Int(IntKind::UChar), + CXType_Short => TypeKind::Int(IntKind::Short), + CXType_UShort => TypeKind::Int(IntKind::UShort), + CXType_WChar => TypeKind::Int(IntKind::WChar), + CXType_Char16 => TypeKind::Int(IntKind::U16), + CXType_Char32 => TypeKind::Int(IntKind::U32), + CXType_Long => TypeKind::Int(IntKind::Long), + CXType_ULong => TypeKind::Int(IntKind::ULong), + CXType_LongLong => TypeKind::Int(IntKind::LongLong), + CXType_ULongLong => TypeKind::Int(IntKind::ULongLong), + CXType_Int128 => TypeKind::Int(IntKind::I128), + CXType_UInt128 => TypeKind::Int(IntKind::U128), + CXType_Float => TypeKind::Float(FloatKind::Float), + CXType_Double => TypeKind::Float(FloatKind::Double), + CXType_LongDouble => TypeKind::Float(FloatKind::LongDouble), + CXType_Float128 => TypeKind::Float(FloatKind::Float128), + CXType_Complex => { + let float_type = + ty.elem_type().expect("Not able to resolve complex type?"); + let float_kind = match float_type.kind() { + CXType_Float => FloatKind::Float, + CXType_Double => FloatKind::Double, + CXType_LongDouble => FloatKind::LongDouble, + CXType_Float128 => FloatKind::Float128, + _ => panic!( + "Non floating-type complex? {:?}, {:?}", + ty, float_type, + ), + }; + TypeKind::Complex(float_kind) + } + _ => return None, + }; + + let spelling = ty.spelling(); + let is_const = ty.is_const(); + let layout = ty.fallible_layout(self).ok(); + let location = ty.declaration().location(); + let ty = Type::new(Some(spelling), layout, type_kind, is_const); + let id = self.next_item_id(); + let item = Item::new( + id, + None, + None, + self.root_module.into(), + ItemKind::Type(ty), + Some(location), + ); + self.add_builtin_item(item); + Some(id.as_type_id_unchecked()) + } + + /// Get the current Clang translation unit that is being processed. + pub(crate) fn translation_unit(&self) -> &clang::TranslationUnit { + &self.translation_unit + } + + /// Have we parsed the macro named `macro_name` already? + pub(crate) fn parsed_macro(&self, macro_name: &[u8]) -> bool { + self.parsed_macros.contains_key(macro_name) + } + + /// Get the currently parsed macros. + pub(crate) fn parsed_macros( + &self, + ) -> &StdHashMap, cexpr::expr::EvalResult> { + debug_assert!(!self.in_codegen_phase()); + &self.parsed_macros + } + + /// Mark the macro named `macro_name` as parsed. + pub(crate) fn note_parsed_macro( + &mut self, + id: Vec, + value: cexpr::expr::EvalResult, + ) { + self.parsed_macros.insert(id, value); + } + + /// Are we in the codegen phase? + pub(crate) fn in_codegen_phase(&self) -> bool { + self.in_codegen + } + + /// Mark the type with the given `name` as replaced by the type with ID + /// `potential_ty`. + /// + /// Replacement types are declared using the `replaces="xxx"` annotation, + /// and implies that the original type is hidden. + pub(crate) fn replace(&mut self, name: &[String], potential_ty: ItemId) { + match self.replacements.entry(name.into()) { + Entry::Vacant(entry) => { + debug!( + "Defining replacement for {:?} as {:?}", + name, potential_ty + ); + entry.insert(potential_ty); + } + Entry::Occupied(occupied) => { + warn!( + "Replacement for {:?} already defined as {:?}; \ + ignoring duplicate replacement definition as {:?}", + name, + occupied.get(), + potential_ty + ); + } + } + } + + /// Has the item with the given `name` and `id` been replaced by another + /// type? + pub(crate) fn is_replaced_type>( + &self, + path: &[String], + id: Id, + ) -> bool { + let id = id.into(); + matches!(self.replacements.get(path), Some(replaced_by) if *replaced_by != id) + } + + /// Is the type with the given `name` marked as opaque? + pub(crate) fn opaque_by_name(&self, path: &[String]) -> bool { + debug_assert!( + self.in_codegen_phase(), + "You're not supposed to call this yet" + ); + self.options.opaque_types.matches(path[1..].join("::")) + } + + /// Get the options used to configure this bindgen context. + pub(crate) fn options(&self) -> &BindgenOptions { + &self.options + } + + /// Tokenizes a namespace cursor in order to get the name and kind of the + /// namespace. + fn tokenize_namespace( + &self, + cursor: &clang::Cursor, + ) -> (Option, ModuleKind) { + assert_eq!( + cursor.kind(), + ::clang_sys::CXCursor_Namespace, + "Be a nice person" + ); + + let mut module_name = None; + let spelling = cursor.spelling(); + if !spelling.is_empty() { + module_name = Some(spelling) + } + + let mut kind = ModuleKind::Normal; + let mut looking_for_name = false; + for token in cursor.tokens().iter() { + match token.spelling() { + b"inline" => { + debug_assert!( + kind != ModuleKind::Inline, + "Multiple inline keywords?" + ); + kind = ModuleKind::Inline; + // When hitting a nested inline namespace we get a spelling + // that looks like ["inline", "foo"]. Deal with it properly. + looking_for_name = true; + } + // The double colon allows us to handle nested namespaces like + // namespace foo::bar { } + // + // libclang still gives us two namespace cursors, which is cool, + // but the tokenization of the second begins with the double + // colon. That's ok, so we only need to handle the weird + // tokenization here. + b"namespace" | b"::" => { + looking_for_name = true; + } + b"{" => { + // This should be an anonymous namespace. + assert!(looking_for_name); + break; + } + name => { + if looking_for_name { + if module_name.is_none() { + module_name = Some( + String::from_utf8_lossy(name).into_owned(), + ); + } + break; + } else { + // This is _likely_, but not certainly, a macro that's + // been placed just before the namespace keyword. + // Unfortunately, clang tokens don't let us easily see + // through the ifdef tokens, so we don't know what this + // token should really be. Instead of panicking though, + // we warn the user that we assumed the token was blank, + // and then move on. + // + // See also https://github.com/rust-lang/rust-bindgen/issues/1676. + warn!( + "Ignored unknown namespace prefix '{}' at {:?} in {:?}", + String::from_utf8_lossy(name), + token, + cursor + ); + } + } + } + } + + (module_name, kind) + } + + /// Given a CXCursor_Namespace cursor, return the item ID of the + /// corresponding module, or create one on the fly. + pub(crate) fn module(&mut self, cursor: clang::Cursor) -> ModuleId { + use clang_sys::*; + assert_eq!(cursor.kind(), CXCursor_Namespace, "Be a nice person"); + let cursor = cursor.canonical(); + if let Some(id) = self.modules.get(&cursor) { + return *id; + } + + let (module_name, kind) = self.tokenize_namespace(&cursor); + + let module_id = self.next_item_id(); + let module = Module::new(module_name, kind); + let module = Item::new( + module_id, + None, + None, + self.current_module.into(), + ItemKind::Module(module), + Some(cursor.location()), + ); + + let module_id = module.id().as_module_id_unchecked(); + self.modules.insert(cursor, module_id); + + self.add_item(module, None, None); + + module_id + } + + /// Start traversing the module with the given `module_id`, invoke the + /// callback `cb`, and then return to traversing the original module. + pub(crate) fn with_module(&mut self, module_id: ModuleId, cb: F) + where + F: FnOnce(&mut Self), + { + debug_assert!(self.resolve_item(module_id).kind().is_module(), "Wat"); + + let previous_id = self.current_module; + self.current_module = module_id; + + cb(self); + + self.current_module = previous_id; + } + + /// Iterate over all (explicitly or transitively) allowlisted items. + /// + /// If no items are explicitly allowlisted, then all items are considered + /// allowlisted. + pub(crate) fn allowlisted_items(&self) -> &ItemSet { + assert!(self.in_codegen_phase()); + assert!(self.current_module == self.root_module); + + self.allowlisted.as_ref().unwrap() + } + + /// Check whether a particular blocklisted type implements a trait or not. + /// Results may be cached. + pub(crate) fn blocklisted_type_implements_trait( + &self, + item: &Item, + derive_trait: DeriveTrait, + ) -> CanDerive { + assert!(self.in_codegen_phase()); + assert!(self.current_module == self.root_module); + + *self + .blocklisted_types_implement_traits + .borrow_mut() + .entry(derive_trait) + .or_default() + .entry(item.id()) + .or_insert_with(|| { + item.expect_type() + .name() + .and_then(|name| { + if self.options.parse_callbacks.is_empty() { + // Sized integer types from get mapped to Rust primitive + // types regardless of whether they are blocklisted, so ensure that + // standard traits are considered derivable for them too. + if self.is_stdint_type(name) { + Some(CanDerive::Yes) + } else { + Some(CanDerive::No) + } + } else { + self.options.last_callback(|cb| { + cb.blocklisted_type_implements_trait( + name, + derive_trait, + ) + }) + } + }) + .unwrap_or(CanDerive::No) + }) + } + + /// Is the given type a type from that corresponds to a Rust primitive type? + pub(crate) fn is_stdint_type(&self, name: &str) -> bool { + match name { + "int8_t" | "uint8_t" | "int16_t" | "uint16_t" | "int32_t" | + "uint32_t" | "int64_t" | "uint64_t" | "uintptr_t" | + "intptr_t" | "ptrdiff_t" => true, + "size_t" | "ssize_t" => self.options.size_t_is_usize, + _ => false, + } + } + + /// Get a reference to the set of items we should generate. + pub(crate) fn codegen_items(&self) -> &ItemSet { + assert!(self.in_codegen_phase()); + assert!(self.current_module == self.root_module); + self.codegen_items.as_ref().unwrap() + } + + /// Compute the allowlisted items set and populate `self.allowlisted`. + fn compute_allowlisted_and_codegen_items(&mut self) { + assert!(self.in_codegen_phase()); + assert!(self.current_module == self.root_module); + assert!(self.allowlisted.is_none()); + let _t = self.timer("compute_allowlisted_and_codegen_items"); + + let roots = { + let mut roots = self + .items() + // Only consider roots that are enabled for codegen. + .filter(|&(_, item)| item.is_enabled_for_codegen(self)) + .filter(|&(_, item)| { + // If nothing is explicitly allowlisted, then everything is fair + // game. + if self.options().allowlisted_types.is_empty() && + self.options().allowlisted_functions.is_empty() && + self.options().allowlisted_vars.is_empty() && + self.options().allowlisted_files.is_empty() + { + return true; + } + + // If this is a type that explicitly replaces another, we assume + // you know what you're doing. + if item.annotations().use_instead_of().is_some() { + return true; + } + + // Items with a source location in an explicitly allowlisted file + // are always included. + if !self.options().allowlisted_files.is_empty() { + if let Some(location) = item.location() { + let (file, _, _, _) = location.location(); + if let Some(filename) = file.name() { + if self + .options() + .allowlisted_files + .matches(filename) + { + return true; + } + } + } + } + + let name = item.path_for_allowlisting(self)[1..].join("::"); + debug!("allowlisted_items: testing {:?}", name); + match *item.kind() { + ItemKind::Module(..) => true, + ItemKind::Function(_) => { + self.options().allowlisted_functions.matches(&name) + } + ItemKind::Var(_) => { + self.options().allowlisted_vars.matches(&name) + } + ItemKind::Type(ref ty) => { + if self.options().allowlisted_types.matches(&name) { + return true; + } + + // Auto-allowlist types that don't need code + // generation if not allowlisting recursively, to + // make the #[derive] analysis not be lame. + if !self.options().allowlist_recursively { + match *ty.kind() { + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Complex(..) | + TypeKind::Array(..) | + TypeKind::Vector(..) | + TypeKind::Pointer(..) | + TypeKind::Reference(..) | + TypeKind::Function(..) | + TypeKind::ResolvedTypeRef(..) | + TypeKind::Opaque | + TypeKind::TypeParam => return true, + _ => {} + } + if self.is_stdint_type(&name) { + return true; + } + } + + // Unnamed top-level enums are special and we + // allowlist them via the `allowlisted_vars` filter, + // since they're effectively top-level constants, + // and there's no way for them to be referenced + // consistently. + let parent = self.resolve_item(item.parent_id()); + if !parent.is_module() { + return false; + } + + let enum_ = match *ty.kind() { + TypeKind::Enum(ref e) => e, + _ => return false, + }; + + if ty.name().is_some() { + return false; + } + + let mut prefix_path = + parent.path_for_allowlisting(self).clone(); + enum_.variants().iter().any(|variant| { + prefix_path.push( + variant.name_for_allowlisting().into(), + ); + let name = prefix_path[1..].join("::"); + prefix_path.pop().unwrap(); + self.options().allowlisted_vars.matches(name) + }) + } + } + }) + .map(|(id, _)| id) + .collect::>(); + + // The reversal preserves the expected ordering of traversal, + // resulting in more stable-ish bindgen-generated names for + // anonymous types (like unions). + roots.reverse(); + roots + }; + + let allowlisted_items_predicate = + if self.options().allowlist_recursively { + traversal::all_edges + } else { + // Only follow InnerType edges from the allowlisted roots. + // Such inner types (e.g. anonymous structs/unions) are + // always emitted by codegen, and they need to be allowlisted + // to make sure they are processed by e.g. the derive analysis. + traversal::only_inner_type_edges + }; + + let allowlisted = AllowlistedItemsTraversal::new( + self, + roots.clone(), + allowlisted_items_predicate, + ) + .collect::(); + + let codegen_items = if self.options().allowlist_recursively { + AllowlistedItemsTraversal::new( + self, + roots, + traversal::codegen_edges, + ) + .collect::() + } else { + allowlisted.clone() + }; + + self.allowlisted = Some(allowlisted); + self.codegen_items = Some(codegen_items); + + for item in self.options().allowlisted_functions.unmatched_items() { + unused_regex_diagnostic(item, "--allowlist-function", self); + } + + for item in self.options().allowlisted_vars.unmatched_items() { + unused_regex_diagnostic(item, "--allowlist-var", self); + } + + for item in self.options().allowlisted_types.unmatched_items() { + unused_regex_diagnostic(item, "--allowlist-type", self); + } + } + + /// Convenient method for getting the prefix to use for most traits in + /// codegen depending on the `use_core` option. + pub(crate) fn trait_prefix(&self) -> Ident { + if self.options().use_core { + self.rust_ident_raw("core") + } else { + self.rust_ident_raw("std") + } + } + + /// Call if a bindgen complex is generated + pub(crate) fn generated_bindgen_complex(&self) { + self.generated_bindgen_complex.set(true) + } + + /// Whether we need to generate the bindgen complex type + pub(crate) fn need_bindgen_complex_type(&self) -> bool { + self.generated_bindgen_complex.get() + } + + /// Compute which `enum`s have an associated `typedef` definition. + fn compute_enum_typedef_combos(&mut self) { + let _t = self.timer("compute_enum_typedef_combos"); + assert!(self.enum_typedef_combos.is_none()); + + let mut enum_typedef_combos = HashSet::default(); + for item in &self.items { + if let Some(ItemKind::Module(module)) = + item.as_ref().map(Item::kind) + { + // Find typedefs in this module, and build set of their names. + let mut names_of_typedefs = HashSet::default(); + for child_id in module.children() { + if let Some(ItemKind::Type(ty)) = + self.items[child_id.0].as_ref().map(Item::kind) + { + if let (Some(name), TypeKind::Alias(type_id)) = + (ty.name(), ty.kind()) + { + // We disregard aliases that refer to the enum + // itself, such as in `typedef enum { ... } Enum;`. + if type_id + .into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(self) + .expect_type() + .is_int() + { + names_of_typedefs.insert(name); + } + } + } + } + + // Find enums in this module, and record the ID of each one that + // has a typedef. + for child_id in module.children() { + if let Some(ItemKind::Type(ty)) = + self.items[child_id.0].as_ref().map(Item::kind) + { + if let (Some(name), true) = (ty.name(), ty.is_enum()) { + if names_of_typedefs.contains(name) { + enum_typedef_combos.insert(*child_id); + } + } + } + } + } + } + + self.enum_typedef_combos = Some(enum_typedef_combos); + } + + /// Look up whether `id` refers to an `enum` whose underlying type is + /// defined by a `typedef`. + pub(crate) fn is_enum_typedef_combo(&self, id: ItemId) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute enum_typedef_combos when we enter codegen", + ); + self.enum_typedef_combos.as_ref().unwrap().contains(&id) + } + + /// Compute whether we can derive debug. + fn compute_cannot_derive_debug(&mut self) { + let _t = self.timer("compute_cannot_derive_debug"); + assert!(self.cannot_derive_debug.is_none()); + if self.options.derive_debug { + self.cannot_derive_debug = + Some(as_cannot_derive_set(analyze::(( + self, + DeriveTrait::Debug, + )))); + } + } + + /// Look up whether the item with `id` can + /// derive debug or not. + pub(crate) fn lookup_can_derive_debug>( + &self, + id: Id, + ) -> bool { + let id = id.into(); + assert!( + self.in_codegen_phase(), + "We only compute can_derive_debug when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` can + // derive debug or not. + !self.cannot_derive_debug.as_ref().unwrap().contains(&id) + } + + /// Compute whether we can derive default. + fn compute_cannot_derive_default(&mut self) { + let _t = self.timer("compute_cannot_derive_default"); + assert!(self.cannot_derive_default.is_none()); + if self.options.derive_default { + self.cannot_derive_default = + Some(as_cannot_derive_set(analyze::(( + self, + DeriveTrait::Default, + )))); + } + } + + /// Look up whether the item with `id` can + /// derive default or not. + pub(crate) fn lookup_can_derive_default>( + &self, + id: Id, + ) -> bool { + let id = id.into(); + assert!( + self.in_codegen_phase(), + "We only compute can_derive_default when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` can + // derive default or not. + !self.cannot_derive_default.as_ref().unwrap().contains(&id) + } + + /// Compute whether we can derive copy. + fn compute_cannot_derive_copy(&mut self) { + let _t = self.timer("compute_cannot_derive_copy"); + assert!(self.cannot_derive_copy.is_none()); + self.cannot_derive_copy = + Some(as_cannot_derive_set(analyze::(( + self, + DeriveTrait::Copy, + )))); + } + + /// Compute whether we can derive hash. + fn compute_cannot_derive_hash(&mut self) { + let _t = self.timer("compute_cannot_derive_hash"); + assert!(self.cannot_derive_hash.is_none()); + if self.options.derive_hash { + self.cannot_derive_hash = + Some(as_cannot_derive_set(analyze::(( + self, + DeriveTrait::Hash, + )))); + } + } + + /// Look up whether the item with `id` can + /// derive hash or not. + pub(crate) fn lookup_can_derive_hash>( + &self, + id: Id, + ) -> bool { + let id = id.into(); + assert!( + self.in_codegen_phase(), + "We only compute can_derive_debug when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` can + // derive hash or not. + !self.cannot_derive_hash.as_ref().unwrap().contains(&id) + } + + /// Compute whether we can derive PartialOrd, PartialEq or Eq. + fn compute_cannot_derive_partialord_partialeq_or_eq(&mut self) { + let _t = self.timer("compute_cannot_derive_partialord_partialeq_or_eq"); + assert!(self.cannot_derive_partialeq_or_partialord.is_none()); + if self.options.derive_partialord || + self.options.derive_partialeq || + self.options.derive_eq + { + self.cannot_derive_partialeq_or_partialord = + Some(analyze::(( + self, + DeriveTrait::PartialEqOrPartialOrd, + ))); + } + } + + /// Look up whether the item with `id` can derive `Partial{Eq,Ord}`. + pub(crate) fn lookup_can_derive_partialeq_or_partialord< + Id: Into, + >( + &self, + id: Id, + ) -> CanDerive { + let id = id.into(); + assert!( + self.in_codegen_phase(), + "We only compute can_derive_partialeq_or_partialord when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` can + // derive partialeq or not. + self.cannot_derive_partialeq_or_partialord + .as_ref() + .unwrap() + .get(&id) + .cloned() + .unwrap_or(CanDerive::Yes) + } + + /// Look up whether the item with `id` can derive `Copy` or not. + pub(crate) fn lookup_can_derive_copy>( + &self, + id: Id, + ) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute can_derive_debug when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` can + // derive `Copy` or not. + let id = id.into(); + + !self.lookup_has_type_param_in_array(id) && + !self.cannot_derive_copy.as_ref().unwrap().contains(&id) + } + + /// Compute whether the type has type parameter in array. + fn compute_has_type_param_in_array(&mut self) { + let _t = self.timer("compute_has_type_param_in_array"); + assert!(self.has_type_param_in_array.is_none()); + self.has_type_param_in_array = + Some(analyze::(self)); + } + + /// Look up whether the item with `id` has type parameter in array or not. + pub(crate) fn lookup_has_type_param_in_array>( + &self, + id: Id, + ) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute has array when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` has + // type parameter in array or not. + self.has_type_param_in_array + .as_ref() + .unwrap() + .contains(&id.into()) + } + + /// Compute whether the type has float. + fn compute_has_float(&mut self) { + let _t = self.timer("compute_has_float"); + assert!(self.has_float.is_none()); + if self.options.derive_eq || self.options.derive_ord { + self.has_float = Some(analyze::(self)); + } + } + + /// Look up whether the item with `id` has array or not. + pub(crate) fn lookup_has_float>(&self, id: Id) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute has float when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` has + // float or not. + self.has_float.as_ref().unwrap().contains(&id.into()) + } + + /// Check if `--no-partialeq` flag is enabled for this item. + pub(crate) fn no_partialeq_by_name(&self, item: &Item) -> bool { + let name = item.path_for_allowlisting(self)[1..].join("::"); + self.options().no_partialeq_types.matches(name) + } + + /// Check if `--no-copy` flag is enabled for this item. + pub(crate) fn no_copy_by_name(&self, item: &Item) -> bool { + let name = item.path_for_allowlisting(self)[1..].join("::"); + self.options().no_copy_types.matches(name) + } + + /// Check if `--no-debug` flag is enabled for this item. + pub(crate) fn no_debug_by_name(&self, item: &Item) -> bool { + let name = item.path_for_allowlisting(self)[1..].join("::"); + self.options().no_debug_types.matches(name) + } + + /// Check if `--no-default` flag is enabled for this item. + pub(crate) fn no_default_by_name(&self, item: &Item) -> bool { + let name = item.path_for_allowlisting(self)[1..].join("::"); + self.options().no_default_types.matches(name) + } + + /// Check if `--no-hash` flag is enabled for this item. + pub(crate) fn no_hash_by_name(&self, item: &Item) -> bool { + let name = item.path_for_allowlisting(self)[1..].join("::"); + self.options().no_hash_types.matches(name) + } + + /// Check if `--must-use-type` flag is enabled for this item. + pub(crate) fn must_use_type_by_name(&self, item: &Item) -> bool { + let name = item.path_for_allowlisting(self)[1..].join("::"); + self.options().must_use_types.matches(name) + } + + /// Wrap some tokens in an `unsafe` block if the `--wrap-unsafe-ops` option is enabled. + pub(crate) fn wrap_unsafe_ops(&self, tokens: impl ToTokens) -> TokenStream { + if self.options.wrap_unsafe_ops { + quote!(unsafe { #tokens }) + } else { + tokens.into_token_stream() + } + } + + /// Get the suffix to be added to `static` functions if the `--wrap-static-fns` option is + /// enabled. + pub(crate) fn wrap_static_fns_suffix(&self) -> &str { + self.options() + .wrap_static_fns_suffix + .as_deref() + .unwrap_or(crate::DEFAULT_NON_EXTERN_FNS_SUFFIX) + } +} + +/// A builder struct for configuring item resolution options. +#[derive(Debug, Copy, Clone)] +pub(crate) struct ItemResolver { + id: ItemId, + through_type_refs: bool, + through_type_aliases: bool, +} + +impl ItemId { + /// Create an `ItemResolver` from this item ID. + pub(crate) fn into_resolver(self) -> ItemResolver { + self.into() + } +} + +impl From for ItemResolver +where + T: Into, +{ + fn from(id: T) -> ItemResolver { + ItemResolver::new(id) + } +} + +impl ItemResolver { + /// Construct a new `ItemResolver` from the given ID. + pub(crate) fn new>(id: Id) -> ItemResolver { + let id = id.into(); + ItemResolver { + id, + through_type_refs: false, + through_type_aliases: false, + } + } + + /// Keep resolving through `Type::TypeRef` items. + pub(crate) fn through_type_refs(mut self) -> ItemResolver { + self.through_type_refs = true; + self + } + + /// Keep resolving through `Type::Alias` items. + pub(crate) fn through_type_aliases(mut self) -> ItemResolver { + self.through_type_aliases = true; + self + } + + /// Finish configuring and perform the actual item resolution. + pub(crate) fn resolve(self, ctx: &BindgenContext) -> &Item { + assert!(ctx.collected_typerefs()); + + let mut id = self.id; + let mut seen_ids = HashSet::default(); + loop { + let item = ctx.resolve_item(id); + + // Detect cycles and bail out. These can happen in certain cases + // involving incomplete qualified dependent types (#2085). + if !seen_ids.insert(id) { + return item; + } + + let ty_kind = item.as_type().map(|t| t.kind()); + match ty_kind { + Some(&TypeKind::ResolvedTypeRef(next_id)) + if self.through_type_refs => + { + id = next_id.into(); + } + // We intentionally ignore template aliases here, as they are + // more complicated, and don't represent a simple renaming of + // some type. + Some(&TypeKind::Alias(next_id)) + if self.through_type_aliases => + { + id = next_id.into(); + } + _ => return item, + } + } + } +} + +/// A type that we are in the middle of parsing. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) struct PartialType { + decl: Cursor, + // Just an ItemId, and not a TypeId, because we haven't finished this type + // yet, so there's still time for things to go wrong. + id: ItemId, +} + +impl PartialType { + /// Construct a new `PartialType`. + pub(crate) fn new(decl: Cursor, id: ItemId) -> PartialType { + // assert!(decl == decl.canonical()); + PartialType { decl, id } + } + + /// The cursor pointing to this partial type's declaration location. + pub(crate) fn decl(&self) -> &Cursor { + &self.decl + } + + /// The item ID allocated for this type. This is *NOT* a key for an entry in + /// the context's item set yet! + pub(crate) fn id(&self) -> ItemId { + self.id + } +} + +impl TemplateParameters for PartialType { + fn self_template_params(&self, _ctx: &BindgenContext) -> Vec { + // Maybe at some point we will eagerly parse named types, but for now we + // don't and this information is unavailable. + vec![] + } + + fn num_self_template_params(&self, _ctx: &BindgenContext) -> usize { + // Wouldn't it be nice if libclang would reliably give us this + // information‽ + match self.decl().kind() { + clang_sys::CXCursor_ClassTemplate | + clang_sys::CXCursor_FunctionTemplate | + clang_sys::CXCursor_TypeAliasTemplateDecl => { + let mut num_params = 0; + self.decl().visit(|c| { + match c.kind() { + clang_sys::CXCursor_TemplateTypeParameter | + clang_sys::CXCursor_TemplateTemplateParameter | + clang_sys::CXCursor_NonTypeTemplateParameter => { + num_params += 1; + } + _ => {} + }; + clang_sys::CXChildVisit_Continue + }); + num_params + } + _ => 0, + } + } +} + +fn unused_regex_diagnostic(item: &str, name: &str, _ctx: &BindgenContext) { + warn!("unused option: {} {}", name, item); + + #[cfg(feature = "experimental")] + if _ctx.options().emit_diagnostics { + use crate::diagnostics::{Diagnostic, Level}; + + Diagnostic::default() + .with_title( + format!("Unused regular expression: `{}`.", item), + Level::Warn, + ) + .add_annotation( + format!("This regular expression was passed to `{}`.", name), + Level::Note, + ) + .display(); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/derive.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/derive.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/derive.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/derive.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,135 @@ +//! Traits for determining whether we can derive traits for a thing or not. +//! +//! These traits tend to come in pairs: +//! +//! 1. A "trivial" version, whose implementations aren't allowed to recursively +//! look at other types or the results of fix point analyses. +//! +//! 2. A "normal" version, whose implementations simply query the results of a +//! fix point analysis. +//! +//! The former is used by the analyses when creating the results queried by the +//! second. + +use super::context::BindgenContext; + +use std::cmp; +use std::ops; + +/// A trait that encapsulates the logic for whether or not we can derive `Debug` +/// for a given thing. +pub(crate) trait CanDeriveDebug { + /// Return `true` if `Debug` can be derived for this thing, `false` + /// otherwise. + fn can_derive_debug(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive `Copy` +/// for a given thing. +pub(crate) trait CanDeriveCopy { + /// Return `true` if `Copy` can be derived for this thing, `false` + /// otherwise. + fn can_derive_copy(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive +/// `Default` for a given thing. +pub(crate) trait CanDeriveDefault { + /// Return `true` if `Default` can be derived for this thing, `false` + /// otherwise. + fn can_derive_default(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive `Hash` +/// for a given thing. +pub(crate) trait CanDeriveHash { + /// Return `true` if `Hash` can be derived for this thing, `false` + /// otherwise. + fn can_derive_hash(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive +/// `PartialEq` for a given thing. +pub(crate) trait CanDerivePartialEq { + /// Return `true` if `PartialEq` can be derived for this thing, `false` + /// otherwise. + fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive +/// `PartialOrd` for a given thing. +pub(crate) trait CanDerivePartialOrd { + /// Return `true` if `PartialOrd` can be derived for this thing, `false` + /// otherwise. + fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive `Eq` +/// for a given thing. +pub(crate) trait CanDeriveEq { + /// Return `true` if `Eq` can be derived for this thing, `false` otherwise. + fn can_derive_eq(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive `Ord` +/// for a given thing. +pub(crate) trait CanDeriveOrd { + /// Return `true` if `Ord` can be derived for this thing, `false` otherwise. + fn can_derive_ord(&self, ctx: &BindgenContext) -> bool; +} + +/// Whether it is possible or not to automatically derive trait for an item. +/// +/// ```ignore +/// No +/// ^ +/// | +/// Manually +/// ^ +/// | +/// Yes +/// ``` +/// +/// Initially we assume that we can derive trait for all types and then +/// update our understanding as we learn more about each type. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub enum CanDerive { + /// Yes, we can derive automatically. + Yes, + + /// The only thing that stops us from automatically deriving is that + /// array with more than maximum number of elements is used. + /// + /// This means we probably can "manually" implement such trait. + Manually, + + /// No, we cannot. + No, +} + +impl Default for CanDerive { + fn default() -> CanDerive { + CanDerive::Yes + } +} + +impl CanDerive { + /// Take the least upper bound of `self` and `rhs`. + pub(crate) fn join(self, rhs: Self) -> Self { + cmp::max(self, rhs) + } +} + +impl ops::BitOr for CanDerive { + type Output = Self; + + fn bitor(self, rhs: Self) -> Self::Output { + self.join(rhs) + } +} + +impl ops::BitOrAssign for CanDerive { + fn bitor_assign(&mut self, rhs: Self) { + *self = self.join(rhs) + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/dot.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/dot.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/dot.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/dot.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,86 @@ +//! Generating Graphviz `dot` files from our IR. + +use super::context::{BindgenContext, ItemId}; +use super::traversal::Trace; +use std::fs::File; +use std::io::{self, Write}; +use std::path::Path; + +/// A trait for anything that can write attributes as `` rows to a dot +/// file. +pub(crate) trait DotAttributes { + /// Write this thing's attributes to the given output. Each attribute must + /// be its own `...`. + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write; +} + +/// Write a graphviz dot file containing our IR. +pub(crate) fn write_dot_file

(ctx: &BindgenContext, path: P) -> io::Result<()> +where + P: AsRef, +{ + let file = File::create(path)?; + let mut dot_file = io::BufWriter::new(file); + writeln!(&mut dot_file, "digraph {{")?; + + let mut err: Option> = None; + + for (id, item) in ctx.items() { + let is_allowlisted = ctx.allowlisted_items().contains(&id); + + writeln!( + &mut dot_file, + r#"{} [fontname="courier", color={}, label=<

"#, + id.as_usize(), + if is_allowlisted { "black" } else { "gray" } + )?; + item.dot_attributes(ctx, &mut dot_file)?; + writeln!(&mut dot_file, r#"
>];"#)?; + + item.trace( + ctx, + &mut |sub_id: ItemId, edge_kind| { + if err.is_some() { + return; + } + + match writeln!( + &mut dot_file, + "{} -> {} [label={:?}, color={}];", + id.as_usize(), + sub_id.as_usize(), + edge_kind, + if is_allowlisted { "black" } else { "gray" } + ) { + Ok(_) => {} + Err(e) => err = Some(Err(e)), + } + }, + &(), + ); + + if let Some(err) = err { + return err; + } + + if let Some(module) = item.as_module() { + for child in module.children() { + writeln!( + &mut dot_file, + "{} -> {} [style=dotted, color=gray]", + item.id().as_usize(), + child.as_usize() + )?; + } + } + } + + writeln!(&mut dot_file, "}}")?; + Ok(()) +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/enum_ty.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/enum_ty.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/enum_ty.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/enum_ty.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,323 @@ +//! Intermediate representation for C/C++ enumerations. + +use super::super::codegen::EnumVariation; +use super::context::{BindgenContext, TypeId}; +use super::item::Item; +use super::ty::{Type, TypeKind}; +use crate::clang; +use crate::ir::annotations::Annotations; +use crate::parse::ParseError; +use crate::regex_set::RegexSet; + +/// An enum representing custom handling that can be given to a variant. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum EnumVariantCustomBehavior { + /// This variant will be a module containing constants. + ModuleConstify, + /// This variant will be constified, that is, forced to generate a constant. + Constify, + /// This variant will be hidden entirely from the resulting enum. + Hide, +} + +/// A C/C++ enumeration. +#[derive(Debug)] +pub(crate) struct Enum { + /// The representation used for this enum; it should be an `IntKind` type or + /// an alias to one. + /// + /// It's `None` if the enum is a forward declaration and isn't defined + /// anywhere else, see `tests/headers/func_ptr_in_struct.h`. + repr: Option, + + /// The different variants, with explicit values. + variants: Vec, +} + +impl Enum { + /// Construct a new `Enum` with the given representation and variants. + pub(crate) fn new( + repr: Option, + variants: Vec, + ) -> Self { + Enum { repr, variants } + } + + /// Get this enumeration's representation. + pub(crate) fn repr(&self) -> Option { + self.repr + } + + /// Get this enumeration's variants. + pub(crate) fn variants(&self) -> &[EnumVariant] { + &self.variants + } + + /// Construct an enumeration from the given Clang type. + pub(crate) fn from_ty( + ty: &clang::Type, + ctx: &mut BindgenContext, + ) -> Result { + use clang_sys::*; + debug!("Enum::from_ty {:?}", ty); + + if ty.kind() != CXType_Enum { + return Err(ParseError::Continue); + } + + let declaration = ty.declaration().canonical(); + let repr = declaration + .enum_type() + .and_then(|et| Item::from_ty(&et, declaration, None, ctx).ok()); + let mut variants = vec![]; + + let variant_ty = + repr.and_then(|r| ctx.resolve_type(r).safe_canonical_type(ctx)); + let is_bool = variant_ty.map_or(false, Type::is_bool); + + // Assume signedness since the default type by the C standard is an int. + let is_signed = variant_ty.map_or(true, |ty| match *ty.kind() { + TypeKind::Int(ref int_kind) => int_kind.is_signed(), + ref other => { + panic!("Since when enums can be non-integers? {:?}", other) + } + }); + + let type_name = ty.spelling(); + let type_name = if type_name.is_empty() { + None + } else { + Some(type_name) + }; + let type_name = type_name.as_deref(); + + let definition = declaration.definition().unwrap_or(declaration); + definition.visit(|cursor| { + if cursor.kind() == CXCursor_EnumConstantDecl { + let value = if is_bool { + cursor.enum_val_boolean().map(EnumVariantValue::Boolean) + } else if is_signed { + cursor.enum_val_signed().map(EnumVariantValue::Signed) + } else { + cursor.enum_val_unsigned().map(EnumVariantValue::Unsigned) + }; + if let Some(val) = value { + let name = cursor.spelling(); + let annotations = Annotations::new(&cursor); + let custom_behavior = ctx + .options() + .last_callback(|callbacks| { + callbacks + .enum_variant_behavior(type_name, &name, val) + }) + .or_else(|| { + let annotations = annotations.as_ref()?; + if annotations.hide() { + Some(EnumVariantCustomBehavior::Hide) + } else if annotations.constify_enum_variant() { + Some(EnumVariantCustomBehavior::Constify) + } else { + None + } + }); + + let new_name = ctx + .options() + .last_callback(|callbacks| { + callbacks.enum_variant_name(type_name, &name, val) + }) + .or_else(|| { + annotations + .as_ref()? + .use_instead_of()? + .last() + .cloned() + }) + .unwrap_or_else(|| name.clone()); + + let comment = cursor.raw_comment(); + variants.push(EnumVariant::new( + new_name, + name, + comment, + val, + custom_behavior, + )); + } + } + CXChildVisit_Continue + }); + Ok(Enum::new(repr, variants)) + } + + fn is_matching_enum( + &self, + ctx: &BindgenContext, + enums: &RegexSet, + item: &Item, + ) -> bool { + let path = item.path_for_allowlisting(ctx); + let enum_ty = item.expect_type(); + + if enums.matches(path[1..].join("::")) { + return true; + } + + // Test the variants if the enum is anonymous. + if enum_ty.name().is_some() { + return false; + } + + self.variants().iter().any(|v| enums.matches(v.name())) + } + + /// Returns the final representation of the enum. + pub(crate) fn computed_enum_variation( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> EnumVariation { + // ModuleConsts has higher precedence before Rust in order to avoid + // problems with overlapping match patterns. + if self.is_matching_enum( + ctx, + &ctx.options().constified_enum_modules, + item, + ) { + EnumVariation::ModuleConsts + } else if self.is_matching_enum( + ctx, + &ctx.options().bitfield_enums, + item, + ) { + EnumVariation::NewType { + is_bitfield: true, + is_global: false, + } + } else if self.is_matching_enum(ctx, &ctx.options().newtype_enums, item) + { + EnumVariation::NewType { + is_bitfield: false, + is_global: false, + } + } else if self.is_matching_enum( + ctx, + &ctx.options().newtype_global_enums, + item, + ) { + EnumVariation::NewType { + is_bitfield: false, + is_global: true, + } + } else if self.is_matching_enum( + ctx, + &ctx.options().rustified_enums, + item, + ) { + EnumVariation::Rust { + non_exhaustive: false, + } + } else if self.is_matching_enum( + ctx, + &ctx.options().rustified_non_exhaustive_enums, + item, + ) { + EnumVariation::Rust { + non_exhaustive: true, + } + } else if self.is_matching_enum( + ctx, + &ctx.options().constified_enums, + item, + ) { + EnumVariation::Consts + } else { + ctx.options().default_enum_style + } + } +} + +/// A single enum variant, to be contained only in an enum. +#[derive(Debug)] +pub(crate) struct EnumVariant { + /// The name of the variant. + name: String, + + /// The original name of the variant (without user mangling) + name_for_allowlisting: String, + + /// An optional doc comment. + comment: Option, + + /// The integer value of the variant. + val: EnumVariantValue, + + /// The custom behavior this variant may have, if any. + custom_behavior: Option, +} + +/// A constant value assigned to an enumeration variant. +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum EnumVariantValue { + /// A boolean constant. + Boolean(bool), + + /// A signed constant. + Signed(i64), + + /// An unsigned constant. + Unsigned(u64), +} + +impl EnumVariant { + /// Construct a new enumeration variant from the given parts. + pub(crate) fn new( + name: String, + name_for_allowlisting: String, + comment: Option, + val: EnumVariantValue, + custom_behavior: Option, + ) -> Self { + EnumVariant { + name, + name_for_allowlisting, + comment, + val, + custom_behavior, + } + } + + /// Get this variant's name. + pub(crate) fn name(&self) -> &str { + &self.name + } + + /// Get this variant's name. + pub(crate) fn name_for_allowlisting(&self) -> &str { + &self.name_for_allowlisting + } + + /// Get this variant's value. + pub(crate) fn val(&self) -> EnumVariantValue { + self.val + } + + /// Get this variant's documentation. + pub(crate) fn comment(&self) -> Option<&str> { + self.comment.as_deref() + } + + /// Returns whether this variant should be enforced to be a constant by code + /// generation. + pub(crate) fn force_constification(&self) -> bool { + self.custom_behavior + .map_or(false, |b| b == EnumVariantCustomBehavior::Constify) + } + + /// Returns whether the current variant should be hidden completely from the + /// resulting rust enum. + pub(crate) fn hidden(&self) -> bool { + self.custom_behavior + .map_or(false, |b| b == EnumVariantCustomBehavior::Hide) + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/function.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/function.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/function.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/function.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,787 @@ +//! Intermediate representation for C/C++ functions and methods. + +use super::comp::MethodKind; +use super::context::{BindgenContext, TypeId}; +use super::dot::DotAttributes; +use super::item::Item; +use super::traversal::{EdgeKind, Trace, Tracer}; +use super::ty::TypeKind; +use crate::callbacks::{ItemInfo, ItemKind}; +use crate::clang::{self, Attribute}; +use crate::parse::{ClangSubItemParser, ParseError, ParseResult}; +use clang_sys::{self, CXCallingConv}; + +use quote::TokenStreamExt; +use std::io; +use std::str::FromStr; + +const RUST_DERIVE_FUNPTR_LIMIT: usize = 12; + +/// What kind of a function are we looking at? +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum FunctionKind { + /// A plain, free function. + Function, + /// A method of some kind. + Method(MethodKind), +} + +impl FunctionKind { + /// Given a clang cursor, return the kind of function it represents, or + /// `None` otherwise. + pub(crate) fn from_cursor(cursor: &clang::Cursor) -> Option { + // FIXME(emilio): Deduplicate logic with `ir::comp`. + Some(match cursor.kind() { + clang_sys::CXCursor_FunctionDecl => FunctionKind::Function, + clang_sys::CXCursor_Constructor => { + FunctionKind::Method(MethodKind::Constructor) + } + clang_sys::CXCursor_Destructor => { + FunctionKind::Method(if cursor.method_is_virtual() { + MethodKind::VirtualDestructor { + pure_virtual: cursor.method_is_pure_virtual(), + } + } else { + MethodKind::Destructor + }) + } + clang_sys::CXCursor_CXXMethod => { + if cursor.method_is_virtual() { + FunctionKind::Method(MethodKind::Virtual { + pure_virtual: cursor.method_is_pure_virtual(), + }) + } else if cursor.method_is_static() { + FunctionKind::Method(MethodKind::Static) + } else { + FunctionKind::Method(MethodKind::Normal) + } + } + _ => return None, + }) + } +} + +/// The style of linkage +#[derive(Debug, Clone, Copy)] +pub(crate) enum Linkage { + /// Externally visible and can be linked against + External, + /// Not exposed externally. 'static inline' functions will have this kind of linkage + Internal, +} + +/// A function declaration, with a signature, arguments, and argument names. +/// +/// The argument names vector must be the same length as the ones in the +/// signature. +#[derive(Debug)] +pub(crate) struct Function { + /// The name of this function. + name: String, + + /// The mangled name, that is, the symbol. + mangled_name: Option, + + /// The link name. If specified, overwrite mangled_name. + link_name: Option, + + /// The ID pointing to the current function signature. + signature: TypeId, + + /// The kind of function this is. + kind: FunctionKind, + + /// The linkage of the function. + linkage: Linkage, +} + +impl Function { + /// Construct a new function. + pub(crate) fn new( + name: String, + mangled_name: Option, + link_name: Option, + signature: TypeId, + kind: FunctionKind, + linkage: Linkage, + ) -> Self { + Function { + name, + mangled_name, + link_name, + signature, + kind, + linkage, + } + } + + /// Get this function's name. + pub(crate) fn name(&self) -> &str { + &self.name + } + + /// Get this function's name. + pub(crate) fn mangled_name(&self) -> Option<&str> { + self.mangled_name.as_deref() + } + + /// Get this function's link name. + pub fn link_name(&self) -> Option<&str> { + self.link_name.as_deref() + } + + /// Get this function's signature type. + pub(crate) fn signature(&self) -> TypeId { + self.signature + } + + /// Get this function's kind. + pub(crate) fn kind(&self) -> FunctionKind { + self.kind + } + + /// Get this function's linkage. + pub(crate) fn linkage(&self) -> Linkage { + self.linkage + } +} + +impl DotAttributes for Function { + fn dot_attributes( + &self, + _ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + if let Some(ref mangled) = self.mangled_name { + let mangled: String = + mangled.chars().flat_map(|c| c.escape_default()).collect(); + writeln!( + out, + "mangled name{}", + mangled + )?; + } + + Ok(()) + } +} + +/// A valid rust ABI. +#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] +pub enum Abi { + /// The default C ABI. + C, + /// The "stdcall" ABI. + Stdcall, + /// The "efiapi" ABI. + EfiApi, + /// The "fastcall" ABI. + Fastcall, + /// The "thiscall" ABI. + ThisCall, + /// The "vectorcall" ABI. + Vectorcall, + /// The "aapcs" ABI. + Aapcs, + /// The "win64" ABI. + Win64, + /// The "C-unwind" ABI. + CUnwind, +} + +impl FromStr for Abi { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "C" => Ok(Self::C), + "stdcall" => Ok(Self::Stdcall), + "efiapi" => Ok(Self::EfiApi), + "fastcall" => Ok(Self::Fastcall), + "thiscall" => Ok(Self::ThisCall), + "vectorcall" => Ok(Self::Vectorcall), + "aapcs" => Ok(Self::Aapcs), + "win64" => Ok(Self::Win64), + "C-unwind" => Ok(Self::CUnwind), + _ => Err(format!("Invalid or unknown ABI {:?}", s)), + } + } +} + +impl std::fmt::Display for Abi { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match *self { + Self::C => "C", + Self::Stdcall => "stdcall", + Self::EfiApi => "efiapi", + Self::Fastcall => "fastcall", + Self::ThisCall => "thiscall", + Self::Vectorcall => "vectorcall", + Self::Aapcs => "aapcs", + Self::Win64 => "win64", + Self::CUnwind => "C-unwind", + }; + + s.fmt(f) + } +} + +impl quote::ToTokens for Abi { + fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { + let abi = self.to_string(); + tokens.append_all(quote! { #abi }); + } +} + +/// An ABI extracted from a clang cursor. +#[derive(Debug, Copy, Clone)] +pub(crate) enum ClangAbi { + /// An ABI known by Rust. + Known(Abi), + /// An unknown or invalid ABI. + Unknown(CXCallingConv), +} + +impl ClangAbi { + /// Returns whether this Abi is known or not. + fn is_unknown(&self) -> bool { + matches!(*self, ClangAbi::Unknown(..)) + } +} + +impl quote::ToTokens for ClangAbi { + fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { + match *self { + Self::Known(abi) => abi.to_tokens(tokens), + Self::Unknown(cc) => panic!( + "Cannot turn unknown calling convention to tokens: {:?}", + cc + ), + } + } +} + +/// A function signature. +#[derive(Debug)] +pub(crate) struct FunctionSig { + /// The name of this function signature. + name: String, + + /// The return type of the function. + return_type: TypeId, + + /// The type of the arguments, optionally with the name of the argument when + /// declared. + argument_types: Vec<(Option, TypeId)>, + + /// Whether this function is variadic. + is_variadic: bool, + is_divergent: bool, + + /// Whether this function's return value must be used. + must_use: bool, + + /// The ABI of this function. + abi: ClangAbi, +} + +fn get_abi(cc: CXCallingConv) -> ClangAbi { + use clang_sys::*; + match cc { + CXCallingConv_Default => ClangAbi::Known(Abi::C), + CXCallingConv_C => ClangAbi::Known(Abi::C), + CXCallingConv_X86StdCall => ClangAbi::Known(Abi::Stdcall), + CXCallingConv_X86FastCall => ClangAbi::Known(Abi::Fastcall), + CXCallingConv_X86ThisCall => ClangAbi::Known(Abi::ThisCall), + CXCallingConv_X86VectorCall => ClangAbi::Known(Abi::Vectorcall), + CXCallingConv_AAPCS => ClangAbi::Known(Abi::Aapcs), + CXCallingConv_X86_64Win64 => ClangAbi::Known(Abi::Win64), + other => ClangAbi::Unknown(other), + } +} + +/// Get the mangled name for the cursor's referent. +pub(crate) fn cursor_mangling( + ctx: &BindgenContext, + cursor: &clang::Cursor, +) -> Option { + if !ctx.options().enable_mangling { + return None; + } + + // We early return here because libclang may crash in some case + // if we pass in a variable inside a partial specialized template. + // See rust-lang/rust-bindgen#67, and rust-lang/rust-bindgen#462. + if cursor.is_in_non_fully_specialized_template() { + return None; + } + + let is_destructor = cursor.kind() == clang_sys::CXCursor_Destructor; + if let Ok(mut manglings) = cursor.cxx_manglings() { + while let Some(m) = manglings.pop() { + // Only generate the destructor group 1, see below. + if is_destructor && !m.ends_with("D1Ev") { + continue; + } + + return Some(m); + } + } + + let mut mangling = cursor.mangling(); + if mangling.is_empty() { + return None; + } + + if is_destructor { + // With old (3.8-) libclang versions, and the Itanium ABI, clang returns + // the "destructor group 0" symbol, which means that it'll try to free + // memory, which definitely isn't what we want. + // + // Explicitly force the destructor group 1 symbol. + // + // See http://refspecs.linuxbase.org/cxxabi-1.83.html#mangling-special + // for the reference, and http://stackoverflow.com/a/6614369/1091587 for + // a more friendly explanation. + // + // We don't need to do this for constructors since clang seems to always + // have returned the C1 constructor. + // + // FIXME(emilio): Can a legit symbol in other ABIs end with this string? + // I don't think so, but if it can this would become a linker error + // anyway, not an invalid free at runtime. + // + // TODO(emilio, #611): Use cpp_demangle if this becomes nastier with + // time. + if mangling.ends_with("D0Ev") { + let new_len = mangling.len() - 4; + mangling.truncate(new_len); + mangling.push_str("D1Ev"); + } + } + + Some(mangling) +} + +fn args_from_ty_and_cursor( + ty: &clang::Type, + cursor: &clang::Cursor, + ctx: &mut BindgenContext, +) -> Vec<(Option, TypeId)> { + let cursor_args = cursor.args().unwrap_or_default().into_iter(); + let type_args = ty.args().unwrap_or_default().into_iter(); + + // Argument types can be found in either the cursor or the type, but argument names may only be + // found on the cursor. We often have access to both a type and a cursor for each argument, but + // in some cases we may only have one. + // + // Prefer using the type as the source of truth for the argument's type, but fall back to + // inspecting the cursor (this happens for Objective C interfaces). + // + // Prefer using the cursor for the argument's type, but fall back to using the parent's cursor + // (this happens for function pointer return types). + cursor_args + .map(Some) + .chain(std::iter::repeat(None)) + .zip(type_args.map(Some).chain(std::iter::repeat(None))) + .take_while(|(cur, ty)| cur.is_some() || ty.is_some()) + .map(|(arg_cur, arg_ty)| { + let name = arg_cur.map(|a| a.spelling()).and_then(|name| { + if name.is_empty() { + None + } else { + Some(name) + } + }); + + let cursor = arg_cur.unwrap_or(*cursor); + let ty = arg_ty.unwrap_or_else(|| cursor.cur_type()); + (name, Item::from_ty_or_ref(ty, cursor, None, ctx)) + }) + .collect() +} + +impl FunctionSig { + /// Construct a new function signature from the given Clang type. + pub(crate) fn from_ty( + ty: &clang::Type, + cursor: &clang::Cursor, + ctx: &mut BindgenContext, + ) -> Result { + use clang_sys::*; + debug!("FunctionSig::from_ty {:?} {:?}", ty, cursor); + + // Skip function templates + let kind = cursor.kind(); + if kind == CXCursor_FunctionTemplate { + return Err(ParseError::Continue); + } + + let spelling = cursor.spelling(); + + // Don't parse operatorxx functions in C++ + let is_operator = |spelling: &str| { + spelling.starts_with("operator") && + !clang::is_valid_identifier(spelling) + }; + if is_operator(&spelling) { + return Err(ParseError::Continue); + } + + // Constructors of non-type template parameter classes for some reason + // include the template parameter in their name. Just skip them, since + // we don't handle well non-type template parameters anyway. + if (kind == CXCursor_Constructor || kind == CXCursor_Destructor) && + spelling.contains('<') + { + return Err(ParseError::Continue); + } + + let cursor = if cursor.is_valid() { + *cursor + } else { + ty.declaration() + }; + + let mut args = match kind { + CXCursor_FunctionDecl | + CXCursor_Constructor | + CXCursor_CXXMethod | + CXCursor_ObjCInstanceMethodDecl | + CXCursor_ObjCClassMethodDecl => { + args_from_ty_and_cursor(ty, &cursor, ctx) + } + _ => { + // For non-CXCursor_FunctionDecl, visiting the cursor's children + // is the only reliable way to get parameter names. + let mut args = vec![]; + cursor.visit(|c| { + if c.kind() == CXCursor_ParmDecl { + let ty = + Item::from_ty_or_ref(c.cur_type(), c, None, ctx); + let name = c.spelling(); + let name = + if name.is_empty() { None } else { Some(name) }; + args.push((name, ty)); + } + CXChildVisit_Continue + }); + + if args.is_empty() { + // FIXME(emilio): Sometimes libclang doesn't expose the + // right AST for functions tagged as stdcall and such... + // + // https://bugs.llvm.org/show_bug.cgi?id=45919 + args_from_ty_and_cursor(ty, &cursor, ctx) + } else { + args + } + } + }; + + let (must_use, mut is_divergent) = + if ctx.options().enable_function_attribute_detection { + let [must_use, no_return, no_return_cpp] = cursor.has_attrs(&[ + Attribute::MUST_USE, + Attribute::NO_RETURN, + Attribute::NO_RETURN_CPP, + ]); + (must_use, no_return || no_return_cpp) + } else { + Default::default() + }; + + // This looks easy to break but the clang parser keeps the type spelling clean even if + // other attributes are added. + is_divergent = + is_divergent || ty.spelling().contains("__attribute__((noreturn))"); + + let is_method = kind == CXCursor_CXXMethod; + let is_constructor = kind == CXCursor_Constructor; + let is_destructor = kind == CXCursor_Destructor; + if (is_constructor || is_destructor || is_method) && + cursor.lexical_parent() != cursor.semantic_parent() + { + // Only parse constructors once. + return Err(ParseError::Continue); + } + + if is_method || is_constructor || is_destructor { + let is_const = is_method && cursor.method_is_const(); + let is_virtual = is_method && cursor.method_is_virtual(); + let is_static = is_method && cursor.method_is_static(); + if !is_static && !is_virtual { + let parent = cursor.semantic_parent(); + let class = Item::parse(parent, None, ctx) + .expect("Expected to parse the class"); + // The `class` most likely is not finished parsing yet, so use + // the unchecked variant. + let class = class.as_type_id_unchecked(); + + let class = if is_const { + let const_class_id = ctx.next_item_id(); + ctx.build_const_wrapper( + const_class_id, + class, + None, + &parent.cur_type(), + ) + } else { + class + }; + + let ptr = + Item::builtin_type(TypeKind::Pointer(class), false, ctx); + args.insert(0, (Some("this".into()), ptr)); + } else if is_virtual { + let void = Item::builtin_type(TypeKind::Void, false, ctx); + let ptr = + Item::builtin_type(TypeKind::Pointer(void), false, ctx); + args.insert(0, (Some("this".into()), ptr)); + } + } + + let ty_ret_type = if kind == CXCursor_ObjCInstanceMethodDecl || + kind == CXCursor_ObjCClassMethodDecl + { + ty.ret_type() + .or_else(|| cursor.ret_type()) + .ok_or(ParseError::Continue)? + } else { + ty.ret_type().ok_or(ParseError::Continue)? + }; + + let ret = if is_constructor && ctx.is_target_wasm32() { + // Constructors in Clang wasm32 target return a pointer to the object + // being constructed. + let void = Item::builtin_type(TypeKind::Void, false, ctx); + Item::builtin_type(TypeKind::Pointer(void), false, ctx) + } else { + Item::from_ty_or_ref(ty_ret_type, cursor, None, ctx) + }; + + // Clang plays with us at "find the calling convention", see #549 and + // co. This seems to be a better fix than that commit. + let mut call_conv = ty.call_conv(); + if let Some(ty) = cursor.cur_type().canonical_type().pointee_type() { + let cursor_call_conv = ty.call_conv(); + if cursor_call_conv != CXCallingConv_Invalid { + call_conv = cursor_call_conv; + } + } + + let abi = get_abi(call_conv); + + if abi.is_unknown() { + warn!("Unknown calling convention: {:?}", call_conv); + } + + Ok(Self { + name: spelling, + return_type: ret, + argument_types: args, + is_variadic: ty.is_variadic(), + is_divergent, + must_use, + abi, + }) + } + + /// Get this function signature's return type. + pub(crate) fn return_type(&self) -> TypeId { + self.return_type + } + + /// Get this function signature's argument (name, type) pairs. + pub(crate) fn argument_types(&self) -> &[(Option, TypeId)] { + &self.argument_types + } + + /// Get this function signature's ABI. + pub(crate) fn abi( + &self, + ctx: &BindgenContext, + name: Option<&str>, + ) -> ClangAbi { + // FIXME (pvdrz): Try to do this check lazily instead. Maybe store the ABI inside `ctx` + // instead?. + if let Some(name) = name { + if let Some((abi, _)) = ctx + .options() + .abi_overrides + .iter() + .find(|(_, regex_set)| regex_set.matches(name)) + { + ClangAbi::Known(*abi) + } else { + self.abi + } + } else if let Some((abi, _)) = ctx + .options() + .abi_overrides + .iter() + .find(|(_, regex_set)| regex_set.matches(&self.name)) + { + ClangAbi::Known(*abi) + } else { + self.abi + } + } + + /// Is this function signature variadic? + pub(crate) fn is_variadic(&self) -> bool { + // Clang reports some functions as variadic when they *might* be + // variadic. We do the argument check because rust doesn't codegen well + // variadic functions without an initial argument. + self.is_variadic && !self.argument_types.is_empty() + } + + /// Must this function's return value be used? + pub(crate) fn must_use(&self) -> bool { + self.must_use + } + + /// Are function pointers with this signature able to derive Rust traits? + /// Rust only supports deriving traits for function pointers with a limited + /// number of parameters and a couple ABIs. + /// + /// For more details, see: + /// + /// * , + /// * , + /// * and + pub(crate) fn function_pointers_can_derive(&self) -> bool { + if self.argument_types.len() > RUST_DERIVE_FUNPTR_LIMIT { + return false; + } + + matches!(self.abi, ClangAbi::Known(Abi::C) | ClangAbi::Unknown(..)) + } + + /// Whether this function has attributes marking it as divergent. + pub(crate) fn is_divergent(&self) -> bool { + self.is_divergent + } +} + +impl ClangSubItemParser for Function { + fn parse( + cursor: clang::Cursor, + context: &mut BindgenContext, + ) -> Result, ParseError> { + use clang_sys::*; + + let kind = match FunctionKind::from_cursor(&cursor) { + None => return Err(ParseError::Continue), + Some(k) => k, + }; + + debug!("Function::parse({:?}, {:?})", cursor, cursor.cur_type()); + let visibility = cursor.visibility(); + if visibility != CXVisibility_Default { + return Err(ParseError::Continue); + } + + if cursor.access_specifier() == CX_CXXPrivate { + return Err(ParseError::Continue); + } + + let linkage = cursor.linkage(); + let linkage = match linkage { + CXLinkage_External | CXLinkage_UniqueExternal => Linkage::External, + CXLinkage_Internal => Linkage::Internal, + _ => return Err(ParseError::Continue), + }; + + if cursor.is_inlined_function() || + cursor + .definition() + .map_or(false, |x| x.is_inlined_function()) + { + if !context.options().generate_inline_functions && + !context.options().wrap_static_fns + { + return Err(ParseError::Continue); + } + + if cursor.is_deleted_function() { + return Err(ParseError::Continue); + } + + // We cannot handle `inline` functions that are not `static`. + if context.options().wrap_static_fns && + cursor.is_inlined_function() && + matches!(linkage, Linkage::External) + { + return Err(ParseError::Continue); + } + } + + // Grab the signature using Item::from_ty. + let sig = Item::from_ty(&cursor.cur_type(), cursor, None, context)?; + + let mut name = cursor.spelling(); + assert!(!name.is_empty(), "Empty function name?"); + + if cursor.kind() == CXCursor_Destructor { + // Remove the leading `~`. The alternative to this is special-casing + // code-generation for destructor functions, which seems less than + // ideal. + if name.starts_with('~') { + name.remove(0); + } + + // Add a suffix to avoid colliding with constructors. This would be + // technically fine (since we handle duplicated functions/methods), + // but seems easy enough to handle it here. + name.push_str("_destructor"); + } + if let Some(nm) = context.options().last_callback(|callbacks| { + callbacks.generated_name_override(ItemInfo { + name: name.as_str(), + kind: ItemKind::Function, + }) + }) { + name = nm; + } + assert!(!name.is_empty(), "Empty function name."); + + let mangled_name = cursor_mangling(context, &cursor); + + let link_name = context.options().last_callback(|callbacks| { + callbacks.generated_link_name_override(ItemInfo { + name: name.as_str(), + kind: ItemKind::Function, + }) + }); + + let function = Self::new( + name.clone(), + mangled_name, + link_name, + sig, + kind, + linkage, + ); + + Ok(ParseResult::New(function, Some(cursor))) + } +} + +impl Trace for FunctionSig { + type Extra = (); + + fn trace(&self, _: &BindgenContext, tracer: &mut T, _: &()) + where + T: Tracer, + { + tracer.visit_kind(self.return_type().into(), EdgeKind::FunctionReturn); + + for &(_, ty) in self.argument_types() { + tracer.visit_kind(ty.into(), EdgeKind::FunctionParameter); + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/int.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/int.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/int.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/int.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,127 @@ +//! Intermediate representation for integral types. + +/// Which integral type are we dealing with? +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub enum IntKind { + /// A `bool`. + Bool, + + /// A `signed char`. + SChar, + + /// An `unsigned char`. + UChar, + + /// A `wchar_t`. + WChar, + + /// A platform-dependent `char` type, with the signedness support. + Char { + /// Whether the char is signed for the target platform. + is_signed: bool, + }, + + /// A `short`. + Short, + + /// An `unsigned short`. + UShort, + + /// An `int`. + Int, + + /// An `unsigned int`. + UInt, + + /// A `long`. + Long, + + /// An `unsigned long`. + ULong, + + /// A `long long`. + LongLong, + + /// An `unsigned long long`. + ULongLong, + + /// A 8-bit signed integer. + I8, + + /// A 8-bit unsigned integer. + U8, + + /// A 16-bit signed integer. + I16, + + /// Either a `char16_t` or a `wchar_t`. + U16, + + /// A 32-bit signed integer. + I32, + + /// A 32-bit unsigned integer. + U32, + + /// A 64-bit signed integer. + I64, + + /// A 64-bit unsigned integer. + U64, + + /// An `int128_t` + I128, + + /// A `uint128_t`. + U128, + + /// A custom integer type, used to allow custom macro types depending on + /// range. + Custom { + /// The name of the type, which would be used without modification. + name: &'static str, + /// Whether the type is signed or not. + is_signed: bool, + }, +} + +impl IntKind { + /// Is this integral type signed? + pub(crate) fn is_signed(&self) -> bool { + use self::IntKind::*; + match *self { + // TODO(emilio): wchar_t can in theory be signed, but we have no way + // to know whether it is or not right now (unlike char, there's no + // WChar_S / WChar_U). + Bool | UChar | UShort | UInt | ULong | ULongLong | U8 | U16 | + WChar | U32 | U64 | U128 => false, + + SChar | Short | Int | Long | LongLong | I8 | I16 | I32 | I64 | + I128 => true, + + Char { is_signed } => is_signed, + + Custom { is_signed, .. } => is_signed, + } + } + + /// If this type has a known size, return it (in bytes). This is to + /// alleviate libclang sometimes not giving us a layout (like in the case + /// when an enum is defined inside a class with template parameters). + pub(crate) fn known_size(&self) -> Option { + use self::IntKind::*; + Some(match *self { + Bool | UChar | SChar | U8 | I8 | Char { .. } => 1, + U16 | I16 => 2, + U32 | I32 => 4, + U64 | I64 => 8, + I128 | U128 => 16, + _ => return None, + }) + } + + /// Whether this type's signedness matches the value. + pub(crate) fn signedness_matches(&self, val: i64) -> bool { + val >= 0 || self.is_signed() + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/item_kind.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/item_kind.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/item_kind.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/item_kind.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,135 @@ +//! Different variants of an `Item` in our intermediate representation. + +use super::context::BindgenContext; +use super::dot::DotAttributes; +use super::function::Function; +use super::module::Module; +use super::ty::Type; +use super::var::Var; +use std::io; + +/// A item we parse and translate. +#[derive(Debug)] +pub(crate) enum ItemKind { + /// A module, created implicitly once (the root module), or via C++ + /// namespaces. + Module(Module), + + /// A type declared in any of the multiple ways it can be declared. + Type(Type), + + /// A function or method declaration. + Function(Function), + + /// A variable declaration, most likely a static. + Var(Var), +} + +impl ItemKind { + /// Get a reference to this `ItemKind`'s underying `Module`, or `None` if it + /// is some other kind. + pub(crate) fn as_module(&self) -> Option<&Module> { + match *self { + ItemKind::Module(ref module) => Some(module), + _ => None, + } + } + + /// Transform our `ItemKind` into a string. + pub(crate) fn kind_name(&self) -> &'static str { + match *self { + ItemKind::Module(..) => "Module", + ItemKind::Type(..) => "Type", + ItemKind::Function(..) => "Function", + ItemKind::Var(..) => "Var", + } + } + + /// Is this a module? + pub(crate) fn is_module(&self) -> bool { + self.as_module().is_some() + } + + /// Get a reference to this `ItemKind`'s underying `Function`, or `None` if + /// it is some other kind. + pub(crate) fn as_function(&self) -> Option<&Function> { + match *self { + ItemKind::Function(ref func) => Some(func), + _ => None, + } + } + + /// Is this a function? + pub(crate) fn is_function(&self) -> bool { + self.as_function().is_some() + } + + /// Get a reference to this `ItemKind`'s underying `Function`, or panic if + /// it is some other kind. + pub(crate) fn expect_function(&self) -> &Function { + self.as_function().expect("Not a function") + } + + /// Get a reference to this `ItemKind`'s underying `Type`, or `None` if + /// it is some other kind. + pub(crate) fn as_type(&self) -> Option<&Type> { + match *self { + ItemKind::Type(ref ty) => Some(ty), + _ => None, + } + } + + /// Get a mutable reference to this `ItemKind`'s underying `Type`, or `None` + /// if it is some other kind. + pub(crate) fn as_type_mut(&mut self) -> Option<&mut Type> { + match *self { + ItemKind::Type(ref mut ty) => Some(ty), + _ => None, + } + } + + /// Is this a type? + pub(crate) fn is_type(&self) -> bool { + self.as_type().is_some() + } + + /// Get a reference to this `ItemKind`'s underying `Type`, or panic if it is + /// some other kind. + pub(crate) fn expect_type(&self) -> &Type { + self.as_type().expect("Not a type") + } + + /// Get a reference to this `ItemKind`'s underying `Var`, or `None` if it is + /// some other kind. + pub(crate) fn as_var(&self) -> Option<&Var> { + match *self { + ItemKind::Var(ref v) => Some(v), + _ => None, + } + } + + /// Is this a variable? + pub(crate) fn is_var(&self) -> bool { + self.as_var().is_some() + } +} + +impl DotAttributes for ItemKind { + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!(out, "kind{}", self.kind_name())?; + + match *self { + ItemKind::Module(ref module) => module.dot_attributes(ctx, out), + ItemKind::Type(ref ty) => ty.dot_attributes(ctx, out), + ItemKind::Function(ref func) => func.dot_attributes(ctx, out), + ItemKind::Var(ref var) => var.dot_attributes(ctx, out), + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/item.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/item.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/item.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/item.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,2026 @@ +//! Bindgen's core intermediate representation type. + +use super::super::codegen::{EnumVariation, CONSTIFIED_ENUM_MODULE_REPR_NAME}; +use super::analysis::{HasVtable, HasVtableResult, Sizedness, SizednessResult}; +use super::annotations::Annotations; +use super::comp::{CompKind, MethodKind}; +use super::context::{BindgenContext, ItemId, PartialType, TypeId}; +use super::derive::{ + CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, + CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, +}; +use super::dot::DotAttributes; +use super::function::{Function, FunctionKind}; +use super::item_kind::ItemKind; +use super::layout::Opaque; +use super::module::Module; +use super::template::{AsTemplateParam, TemplateParameters}; +use super::traversal::{EdgeKind, Trace, Tracer}; +use super::ty::{Type, TypeKind}; +use crate::clang; +use crate::parse::{ClangSubItemParser, ParseError, ParseResult}; + +use lazycell::LazyCell; + +use std::cell::Cell; +use std::collections::BTreeSet; +use std::fmt::Write; +use std::io; +use std::iter; + +/// A trait to get the canonical name from an item. +/// +/// This is the trait that will eventually isolate all the logic related to name +/// mangling and that kind of stuff. +/// +/// This assumes no nested paths, at some point I'll have to make it a more +/// complex thing. +/// +/// This name is required to be safe for Rust, that is, is not expected to +/// return any rust keyword from here. +pub(crate) trait ItemCanonicalName { + /// Get the canonical name for this item. + fn canonical_name(&self, ctx: &BindgenContext) -> String; +} + +/// The same, but specifies the path that needs to be followed to reach an item. +/// +/// To contrast with canonical_name, here's an example: +/// +/// ```c++ +/// namespace foo { +/// const BAR = 3; +/// } +/// ``` +/// +/// For bar, the canonical path is `vec!["foo", "BAR"]`, while the canonical +/// name is just `"BAR"`. +pub(crate) trait ItemCanonicalPath { + /// Get the namespace-aware canonical path for this item. This means that if + /// namespaces are disabled, you'll get a single item, and otherwise you get + /// the whole path. + fn namespace_aware_canonical_path( + &self, + ctx: &BindgenContext, + ) -> Vec; + + /// Get the canonical path for this item. + fn canonical_path(&self, ctx: &BindgenContext) -> Vec; +} + +/// A trait for determining if some IR thing is opaque or not. +pub(crate) trait IsOpaque { + /// Extra context the IR thing needs to determine if it is opaque or not. + type Extra; + + /// Returns `true` if the thing is opaque, and `false` otherwise. + /// + /// May only be called when `ctx` is in the codegen phase. + fn is_opaque(&self, ctx: &BindgenContext, extra: &Self::Extra) -> bool; +} + +/// A trait for determining if some IR thing has type parameter in array or not. +pub(crate) trait HasTypeParamInArray { + /// Returns `true` if the thing has Array, and `false` otherwise. + fn has_type_param_in_array(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait for determining if some IR thing has float or not. +pub(crate) trait HasFloat { + /// Returns `true` if the thing has float, and `false` otherwise. + fn has_float(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait for iterating over an item and its parents and up its ancestor chain +/// up to (but not including) the implicit root module. +pub(crate) trait ItemAncestors { + /// Get an iterable over this item's ancestors. + fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a>; +} + +#[cfg(testing_only_extra_assertions)] +type DebugOnlyItemSet = ItemSet; + +#[cfg(not(testing_only_extra_assertions))] +struct DebugOnlyItemSet; + +#[cfg(not(testing_only_extra_assertions))] +impl DebugOnlyItemSet { + fn new() -> Self { + DebugOnlyItemSet + } + + fn contains(&self, _id: &ItemId) -> bool { + false + } + + fn insert(&mut self, _id: ItemId) {} +} + +/// An iterator over an item and its ancestors. +pub(crate) struct ItemAncestorsIter<'a> { + item: ItemId, + ctx: &'a BindgenContext, + seen: DebugOnlyItemSet, +} + +impl<'a> ItemAncestorsIter<'a> { + fn new>(ctx: &'a BindgenContext, id: Id) -> Self { + ItemAncestorsIter { + item: id.into(), + ctx, + seen: DebugOnlyItemSet::new(), + } + } +} + +impl<'a> Iterator for ItemAncestorsIter<'a> { + type Item = ItemId; + + fn next(&mut self) -> Option { + let item = self.ctx.resolve_item(self.item); + + if item.parent_id() == self.item { + None + } else { + self.item = item.parent_id(); + + extra_assert!(!self.seen.contains(&item.id())); + self.seen.insert(item.id()); + + Some(item.id()) + } + } +} + +impl AsTemplateParam for T +where + T: Copy + Into, +{ + type Extra = (); + + fn as_template_param( + &self, + ctx: &BindgenContext, + _: &(), + ) -> Option { + ctx.resolve_item((*self).into()).as_template_param(ctx, &()) + } +} + +impl AsTemplateParam for Item { + type Extra = (); + + fn as_template_param( + &self, + ctx: &BindgenContext, + _: &(), + ) -> Option { + self.kind.as_template_param(ctx, self) + } +} + +impl AsTemplateParam for ItemKind { + type Extra = Item; + + fn as_template_param( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> Option { + match *self { + ItemKind::Type(ref ty) => ty.as_template_param(ctx, item), + ItemKind::Module(..) | + ItemKind::Function(..) | + ItemKind::Var(..) => None, + } + } +} + +impl ItemCanonicalName for T +where + T: Copy + Into, +{ + fn canonical_name(&self, ctx: &BindgenContext) -> String { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.resolve_item(*self).canonical_name(ctx) + } +} + +impl ItemCanonicalPath for T +where + T: Copy + Into, +{ + fn namespace_aware_canonical_path( + &self, + ctx: &BindgenContext, + ) -> Vec { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.resolve_item(*self).namespace_aware_canonical_path(ctx) + } + + fn canonical_path(&self, ctx: &BindgenContext) -> Vec { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.resolve_item(*self).canonical_path(ctx) + } +} + +impl ItemAncestors for T +where + T: Copy + Into, +{ + fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a> { + ItemAncestorsIter::new(ctx, *self) + } +} + +impl ItemAncestors for Item { + fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a> { + self.id().ancestors(ctx) + } +} + +impl Trace for Id +where + Id: Copy + Into, +{ + type Extra = (); + + fn trace(&self, ctx: &BindgenContext, tracer: &mut T, extra: &()) + where + T: Tracer, + { + ctx.resolve_item(*self).trace(ctx, tracer, extra); + } +} + +impl Trace for Item { + type Extra = (); + + fn trace(&self, ctx: &BindgenContext, tracer: &mut T, _extra: &()) + where + T: Tracer, + { + // Even if this item is blocklisted/hidden, we want to trace it. It is + // traversal iterators' consumers' responsibility to filter items as + // needed. Generally, this filtering happens in the implementation of + // `Iterator` for `allowlistedItems`. Fully tracing blocklisted items is + // necessary for things like the template parameter usage analysis to + // function correctly. + + match *self.kind() { + ItemKind::Type(ref ty) => { + // There are some types, like resolved type references, where we + // don't want to stop collecting types even though they may be + // opaque. + if ty.should_be_traced_unconditionally() || + !self.is_opaque(ctx, &()) + { + ty.trace(ctx, tracer, self); + } + } + ItemKind::Function(ref fun) => { + // Just the same way, it has not real meaning for a function to + // be opaque, so we trace across it. + tracer.visit(fun.signature().into()); + } + ItemKind::Var(ref var) => { + tracer.visit_kind(var.ty().into(), EdgeKind::VarType); + } + ItemKind::Module(_) => { + // Module -> children edges are "weak", and we do not want to + // trace them. If we did, then allowlisting wouldn't work as + // expected: everything in every module would end up + // allowlisted. + // + // TODO: make a new edge kind for module -> children edges and + // filter them during allowlisting traversals. + } + } + } +} + +impl CanDeriveDebug for Item { + fn can_derive_debug(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_debug(ctx) + } +} + +impl CanDeriveDefault for Item { + fn can_derive_default(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_default(ctx) + } +} + +impl CanDeriveCopy for Item { + fn can_derive_copy(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_copy(ctx) + } +} + +impl CanDeriveHash for Item { + fn can_derive_hash(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_hash(ctx) + } +} + +impl CanDerivePartialOrd for Item { + fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_partialord(ctx) + } +} + +impl CanDerivePartialEq for Item { + fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_partialeq(ctx) + } +} + +impl CanDeriveEq for Item { + fn can_derive_eq(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_eq(ctx) + } +} + +impl CanDeriveOrd for Item { + fn can_derive_ord(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_ord(ctx) + } +} + +/// An item is the base of the bindgen representation, it can be either a +/// module, a type, a function, or a variable (see `ItemKind` for more +/// information). +/// +/// Items refer to each other by `ItemId`. Every item has its parent's +/// ID. Depending on the kind of item this is, it may also refer to other items, +/// such as a compound type item referring to other types. Collectively, these +/// references form a graph. +/// +/// The entry-point to this graph is the "root module": a meta-item used to hold +/// all top-level items. +/// +/// An item may have a comment, and annotations (see the `annotations` module). +/// +/// Note that even though we parse all the types of annotations in comments, not +/// all of them apply to every item. Those rules are described in the +/// `annotations` module. +#[derive(Debug)] +pub(crate) struct Item { + /// This item's ID. + id: ItemId, + + /// The item's local ID, unique only amongst its siblings. Only used for + /// anonymous items. + /// + /// Lazily initialized in local_id(). + /// + /// Note that only structs, unions, and enums get a local type ID. In any + /// case this is an implementation detail. + local_id: LazyCell, + + /// The next local ID to use for a child or template instantiation. + next_child_local_id: Cell, + + /// A cached copy of the canonical name, as returned by `canonical_name`. + /// + /// This is a fairly used operation during codegen so this makes bindgen + /// considerably faster in those cases. + canonical_name: LazyCell, + + /// The path to use for allowlisting and other name-based checks, as + /// returned by `path_for_allowlisting`, lazily constructed. + path_for_allowlisting: LazyCell>, + + /// A doc comment over the item, if any. + comment: Option, + /// Annotations extracted from the doc comment, or the default ones + /// otherwise. + annotations: Annotations, + /// An item's parent ID. This will most likely be a class where this item + /// was declared, or a module, etc. + /// + /// All the items have a parent, except the root module, in which case the + /// parent ID is its own ID. + parent_id: ItemId, + /// The item kind. + kind: ItemKind, + /// The source location of the item. + location: Option, +} + +impl AsRef for Item { + fn as_ref(&self) -> &ItemId { + &self.id + } +} + +impl Item { + /// Construct a new `Item`. + pub(crate) fn new( + id: ItemId, + comment: Option, + annotations: Option, + parent_id: ItemId, + kind: ItemKind, + location: Option, + ) -> Self { + debug_assert!(id != parent_id || kind.is_module()); + Item { + id, + local_id: LazyCell::new(), + next_child_local_id: Cell::new(1), + canonical_name: LazyCell::new(), + path_for_allowlisting: LazyCell::new(), + parent_id, + comment, + annotations: annotations.unwrap_or_default(), + kind, + location, + } + } + + /// Construct a new opaque item type. + pub(crate) fn new_opaque_type( + with_id: ItemId, + ty: &clang::Type, + ctx: &mut BindgenContext, + ) -> TypeId { + let location = ty.declaration().location(); + let ty = Opaque::from_clang_ty(ty, ctx); + let kind = ItemKind::Type(ty); + let parent = ctx.root_module().into(); + ctx.add_item( + Item::new(with_id, None, None, parent, kind, Some(location)), + None, + None, + ); + with_id.as_type_id_unchecked() + } + + /// Get this `Item`'s identifier. + pub(crate) fn id(&self) -> ItemId { + self.id + } + + /// Get this `Item`'s parent's identifier. + /// + /// For the root module, the parent's ID is its own ID. + pub(crate) fn parent_id(&self) -> ItemId { + self.parent_id + } + + /// Set this item's parent ID. + /// + /// This is only used so replacements get generated in the proper module. + pub(crate) fn set_parent_for_replacement>( + &mut self, + id: Id, + ) { + self.parent_id = id.into(); + } + + /// Returns the depth this item is indented to. + /// + /// FIXME(emilio): This may need fixes for the enums within modules stuff. + pub(crate) fn codegen_depth(&self, ctx: &BindgenContext) -> usize { + if !ctx.options().enable_cxx_namespaces { + return 0; + } + + self.ancestors(ctx) + .filter(|id| { + ctx.resolve_item(*id).as_module().map_or(false, |module| { + !module.is_inline() || + ctx.options().conservative_inline_namespaces + }) + }) + .count() + + 1 + } + + /// Get this `Item`'s comment, if it has any, already preprocessed and with + /// the right indentation. + pub(crate) fn comment(&self, ctx: &BindgenContext) -> Option { + if !ctx.options().generate_comments { + return None; + } + + self.comment + .as_ref() + .map(|comment| ctx.options().process_comment(comment)) + } + + /// What kind of item is this? + pub(crate) fn kind(&self) -> &ItemKind { + &self.kind + } + + /// Get a mutable reference to this item's kind. + pub(crate) fn kind_mut(&mut self) -> &mut ItemKind { + &mut self.kind + } + + /// Where in the source is this item located? + pub(crate) fn location(&self) -> Option<&clang::SourceLocation> { + self.location.as_ref() + } + + /// Get an identifier that differentiates this item from its siblings. + /// + /// This should stay relatively stable in the face of code motion outside or + /// below this item's lexical scope, meaning that this can be useful for + /// generating relatively stable identifiers within a scope. + pub(crate) fn local_id(&self, ctx: &BindgenContext) -> usize { + *self.local_id.borrow_with(|| { + let parent = ctx.resolve_item(self.parent_id); + parent.next_child_local_id() + }) + } + + /// Get an identifier that differentiates a child of this item of other + /// related items. + /// + /// This is currently used for anonymous items, and template instantiation + /// tests, in both cases in order to reduce noise when system headers are at + /// place. + pub(crate) fn next_child_local_id(&self) -> usize { + let local_id = self.next_child_local_id.get(); + self.next_child_local_id.set(local_id + 1); + local_id + } + + /// Returns whether this item is a top-level item, from the point of view of + /// bindgen. + /// + /// This point of view changes depending on whether namespaces are enabled + /// or not. That way, in the following example: + /// + /// ```c++ + /// namespace foo { + /// static int var; + /// } + /// ``` + /// + /// `var` would be a toplevel item if namespaces are disabled, but won't if + /// they aren't. + /// + /// This function is used to determine when the codegen phase should call + /// `codegen` on an item, since any item that is not top-level will be + /// generated by its parent. + pub(crate) fn is_toplevel(&self, ctx: &BindgenContext) -> bool { + // FIXME: Workaround for some types falling behind when parsing weird + // stl classes, for example. + if ctx.options().enable_cxx_namespaces && + self.kind().is_module() && + self.id() != ctx.root_module() + { + return false; + } + + let mut parent = self.parent_id; + loop { + let parent_item = match ctx.resolve_item_fallible(parent) { + Some(item) => item, + None => return false, + }; + + if parent_item.id() == ctx.root_module() { + return true; + } else if ctx.options().enable_cxx_namespaces || + !parent_item.kind().is_module() + { + return false; + } + + parent = parent_item.parent_id(); + } + } + + /// Get a reference to this item's underlying `Type`. Panic if this is some + /// other kind of item. + pub(crate) fn expect_type(&self) -> &Type { + self.kind().expect_type() + } + + /// Get a reference to this item's underlying `Type`, or `None` if this is + /// some other kind of item. + pub(crate) fn as_type(&self) -> Option<&Type> { + self.kind().as_type() + } + + /// Get a reference to this item's underlying `Function`. Panic if this is + /// some other kind of item. + pub(crate) fn expect_function(&self) -> &Function { + self.kind().expect_function() + } + + /// Is this item a module? + pub(crate) fn is_module(&self) -> bool { + matches!(self.kind, ItemKind::Module(..)) + } + + /// Get this item's annotations. + pub(crate) fn annotations(&self) -> &Annotations { + &self.annotations + } + + /// Whether this item should be blocklisted. + /// + /// This may be due to either annotations or to other kind of configuration. + pub(crate) fn is_blocklisted(&self, ctx: &BindgenContext) -> bool { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + if self.annotations.hide() { + return true; + } + + if !ctx.options().blocklisted_files.is_empty() { + if let Some(location) = &self.location { + let (file, _, _, _) = location.location(); + if let Some(filename) = file.name() { + if ctx.options().blocklisted_files.matches(filename) { + return true; + } + } + } + } + + let path = self.path_for_allowlisting(ctx); + let name = path[1..].join("::"); + ctx.options().blocklisted_items.matches(&name) || + match self.kind { + ItemKind::Type(..) => { + ctx.options().blocklisted_types.matches(&name) || + ctx.is_replaced_type(path, self.id) + } + ItemKind::Function(..) => { + ctx.options().blocklisted_functions.matches(&name) + } + // TODO: Add constant / namespace blocklisting? + ItemKind::Var(..) | ItemKind::Module(..) => false, + } + } + + /// Take out item NameOptions + pub(crate) fn name<'a>( + &'a self, + ctx: &'a BindgenContext, + ) -> NameOptions<'a> { + NameOptions::new(self, ctx) + } + + /// Get the target item ID for name generation. + fn name_target(&self, ctx: &BindgenContext) -> ItemId { + let mut targets_seen = DebugOnlyItemSet::new(); + let mut item = self; + + loop { + extra_assert!(!targets_seen.contains(&item.id())); + targets_seen.insert(item.id()); + + if self.annotations().use_instead_of().is_some() { + return self.id(); + } + + match *item.kind() { + ItemKind::Type(ref ty) => match *ty.kind() { + TypeKind::ResolvedTypeRef(inner) => { + item = ctx.resolve_item(inner); + } + TypeKind::TemplateInstantiation(ref inst) => { + item = ctx.resolve_item(inst.template_definition()); + } + _ => return item.id(), + }, + _ => return item.id(), + } + } + } + + /// Create a fully disambiguated name for an item, including template + /// parameters if it is a type + pub(crate) fn full_disambiguated_name( + &self, + ctx: &BindgenContext, + ) -> String { + let mut s = String::new(); + let level = 0; + self.push_disambiguated_name(ctx, &mut s, level); + s + } + + /// Helper function for full_disambiguated_name + fn push_disambiguated_name( + &self, + ctx: &BindgenContext, + to: &mut String, + level: u8, + ) { + to.push_str(&self.canonical_name(ctx)); + if let ItemKind::Type(ref ty) = *self.kind() { + if let TypeKind::TemplateInstantiation(ref inst) = *ty.kind() { + to.push_str(&format!("_open{}_", level)); + for arg in inst.template_arguments() { + arg.into_resolver() + .through_type_refs() + .resolve(ctx) + .push_disambiguated_name(ctx, to, level + 1); + to.push('_'); + } + to.push_str(&format!("close{}", level)); + } + } + } + + /// Get this function item's name, or `None` if this item is not a function. + fn func_name(&self) -> Option<&str> { + match *self.kind() { + ItemKind::Function(ref func) => Some(func.name()), + _ => None, + } + } + + /// Get the overload index for this method. If this is not a method, return + /// `None`. + fn overload_index(&self, ctx: &BindgenContext) -> Option { + self.func_name().and_then(|func_name| { + let parent = ctx.resolve_item(self.parent_id()); + if let ItemKind::Type(ref ty) = *parent.kind() { + if let TypeKind::Comp(ref ci) = *ty.kind() { + // All the constructors have the same name, so no need to + // resolve and check. + return ci + .constructors() + .iter() + .position(|c| *c == self.id()) + .or_else(|| { + ci.methods() + .iter() + .filter(|m| { + let item = ctx.resolve_item(m.signature()); + let func = item.expect_function(); + func.name() == func_name + }) + .position(|m| m.signature() == self.id()) + }); + } + } + + None + }) + } + + /// Get this item's base name (aka non-namespaced name). + fn base_name(&self, ctx: &BindgenContext) -> String { + if let Some(path) = self.annotations().use_instead_of() { + return path.last().unwrap().clone(); + } + + match *self.kind() { + ItemKind::Var(ref var) => var.name().to_owned(), + ItemKind::Module(ref module) => { + module.name().map(ToOwned::to_owned).unwrap_or_else(|| { + format!("_bindgen_mod_{}", self.exposed_id(ctx)) + }) + } + ItemKind::Type(ref ty) => { + ty.sanitized_name(ctx).map(Into::into).unwrap_or_else(|| { + format!("_bindgen_ty_{}", self.exposed_id(ctx)) + }) + } + ItemKind::Function(ref fun) => { + let mut name = fun.name().to_owned(); + + if let Some(idx) = self.overload_index(ctx) { + if idx > 0 { + write!(&mut name, "{}", idx).unwrap(); + } + } + + name + } + } + } + + fn is_anon(&self) -> bool { + match self.kind() { + ItemKind::Module(module) => module.name().is_none(), + ItemKind::Type(ty) => ty.name().is_none(), + ItemKind::Function(_) => false, + ItemKind::Var(_) => false, + } + } + + /// Get the canonical name without taking into account the replaces + /// annotation. + /// + /// This is the base logic used to implement hiding and replacing via + /// annotations, and also to implement proper name mangling. + /// + /// The idea is that each generated type in the same "level" (read: module + /// or namespace) has a unique canonical name. + /// + /// This name should be derived from the immutable state contained in the + /// type and the parent chain, since it should be consistent. + /// + /// If `BindgenOptions::disable_nested_struct_naming` is true then returned + /// name is the inner most non-anonymous name plus all the anonymous base names + /// that follows. + pub(crate) fn real_canonical_name( + &self, + ctx: &BindgenContext, + opt: &NameOptions, + ) -> String { + let target = ctx.resolve_item(self.name_target(ctx)); + + // Short-circuit if the target has an override, and just use that. + if let Some(path) = target.annotations.use_instead_of() { + if ctx.options().enable_cxx_namespaces { + return path.last().unwrap().clone(); + } + return path.join("_"); + } + + let base_name = target.base_name(ctx); + + // Named template type arguments are never namespaced, and never + // mangled. + if target.is_template_param(ctx, &()) { + return base_name; + } + + // Ancestors' ID iter + let mut ids_iter = target + .parent_id() + .ancestors(ctx) + .filter(|id| *id != ctx.root_module()) + .take_while(|id| { + // Stop iterating ancestors once we reach a non-inline namespace + // when opt.within_namespaces is set. + !opt.within_namespaces || !ctx.resolve_item(*id).is_module() + }) + .filter(|id| { + if !ctx.options().conservative_inline_namespaces { + if let ItemKind::Module(ref module) = + *ctx.resolve_item(*id).kind() + { + return !module.is_inline(); + } + } + + true + }); + + let ids: Vec<_> = if ctx.options().disable_nested_struct_naming { + let mut ids = Vec::new(); + + // If target is anonymous we need find its first named ancestor. + if target.is_anon() { + for id in ids_iter.by_ref() { + ids.push(id); + + if !ctx.resolve_item(id).is_anon() { + break; + } + } + } + + ids + } else { + ids_iter.collect() + }; + + // Concatenate this item's ancestors' names together. + let mut names: Vec<_> = ids + .into_iter() + .map(|id| { + let item = ctx.resolve_item(id); + let target = ctx.resolve_item(item.name_target(ctx)); + target.base_name(ctx) + }) + .filter(|name| !name.is_empty()) + .collect(); + + names.reverse(); + + if !base_name.is_empty() { + names.push(base_name); + } + + if ctx.options().c_naming { + if let Some(prefix) = self.c_naming_prefix() { + names.insert(0, prefix.to_string()); + } + } + + let name = names.join("_"); + + let name = if opt.user_mangled == UserMangled::Yes { + ctx.options() + .last_callback(|callbacks| callbacks.item_name(&name)) + .unwrap_or(name) + } else { + name + }; + + ctx.rust_mangle(&name).into_owned() + } + + /// The exposed ID that represents an unique ID among the siblings of a + /// given item. + pub(crate) fn exposed_id(&self, ctx: &BindgenContext) -> String { + // Only use local ids for enums, classes, structs and union types. All + // other items use their global ID. + let ty_kind = self.kind().as_type().map(|t| t.kind()); + if let Some(ty_kind) = ty_kind { + match *ty_kind { + TypeKind::Comp(..) | + TypeKind::TemplateInstantiation(..) | + TypeKind::Enum(..) => return self.local_id(ctx).to_string(), + _ => {} + } + } + + // Note that this `id_` prefix prevents (really unlikely) collisions + // between the global ID and the local ID of an item with the same + // parent. + format!("id_{}", self.id().as_usize()) + } + + /// Get a reference to this item's `Module`, or `None` if this is not a + /// `Module` item. + pub(crate) fn as_module(&self) -> Option<&Module> { + match self.kind { + ItemKind::Module(ref module) => Some(module), + _ => None, + } + } + + /// Get a mutable reference to this item's `Module`, or `None` if this is + /// not a `Module` item. + pub(crate) fn as_module_mut(&mut self) -> Option<&mut Module> { + match self.kind { + ItemKind::Module(ref mut module) => Some(module), + _ => None, + } + } + + /// Returns whether the item is a constified module enum + fn is_constified_enum_module(&self, ctx: &BindgenContext) -> bool { + // Do not jump through aliases, except for aliases that point to a type + // with the same name, since we dont generate coe for them. + let item = self.id.into_resolver().through_type_refs().resolve(ctx); + let type_ = match *item.kind() { + ItemKind::Type(ref type_) => type_, + _ => return false, + }; + + match *type_.kind() { + TypeKind::Enum(ref enum_) => { + enum_.computed_enum_variation(ctx, self) == + EnumVariation::ModuleConsts + } + TypeKind::Alias(inner_id) => { + // TODO(emilio): Make this "hop through type aliases that aren't + // really generated" an option in `ItemResolver`? + let inner_item = ctx.resolve_item(inner_id); + let name = item.canonical_name(ctx); + + if inner_item.canonical_name(ctx) == name { + inner_item.is_constified_enum_module(ctx) + } else { + false + } + } + _ => false, + } + } + + /// Is this item of a kind that is enabled for code generation? + pub(crate) fn is_enabled_for_codegen(&self, ctx: &BindgenContext) -> bool { + let cc = &ctx.options().codegen_config; + match *self.kind() { + ItemKind::Module(..) => true, + ItemKind::Var(_) => cc.vars(), + ItemKind::Type(_) => cc.types(), + ItemKind::Function(ref f) => match f.kind() { + FunctionKind::Function => cc.functions(), + FunctionKind::Method(MethodKind::Constructor) => { + cc.constructors() + } + FunctionKind::Method(MethodKind::Destructor) | + FunctionKind::Method(MethodKind::VirtualDestructor { + .. + }) => cc.destructors(), + FunctionKind::Method(MethodKind::Static) | + FunctionKind::Method(MethodKind::Normal) | + FunctionKind::Method(MethodKind::Virtual { .. }) => { + cc.methods() + } + }, + } + } + + /// Returns the path we should use for allowlisting / blocklisting, which + /// doesn't include user-mangling. + pub(crate) fn path_for_allowlisting( + &self, + ctx: &BindgenContext, + ) -> &Vec { + self.path_for_allowlisting + .borrow_with(|| self.compute_path(ctx, UserMangled::No)) + } + + fn compute_path( + &self, + ctx: &BindgenContext, + mangled: UserMangled, + ) -> Vec { + if let Some(path) = self.annotations().use_instead_of() { + let mut ret = + vec![ctx.resolve_item(ctx.root_module()).name(ctx).get()]; + ret.extend_from_slice(path); + return ret; + } + + let target = ctx.resolve_item(self.name_target(ctx)); + let mut path: Vec<_> = target + .ancestors(ctx) + .chain(iter::once(ctx.root_module().into())) + .map(|id| ctx.resolve_item(id)) + .filter(|item| { + item.id() == target.id() || + item.as_module().map_or(false, |module| { + !module.is_inline() || + ctx.options().conservative_inline_namespaces + }) + }) + .map(|item| { + ctx.resolve_item(item.name_target(ctx)) + .name(ctx) + .within_namespaces() + .user_mangled(mangled) + .get() + }) + .collect(); + path.reverse(); + path + } + + /// Returns a prefix for the canonical name when C naming is enabled. + fn c_naming_prefix(&self) -> Option<&str> { + let ty = match self.kind { + ItemKind::Type(ref ty) => ty, + _ => return None, + }; + + Some(match ty.kind() { + TypeKind::Comp(ref ci) => match ci.kind() { + CompKind::Struct => "struct", + CompKind::Union => "union", + }, + TypeKind::Enum(..) => "enum", + _ => return None, + }) + } + + /// Whether this is a `#[must_use]` type. + pub(crate) fn must_use(&self, ctx: &BindgenContext) -> bool { + self.annotations().must_use_type() || ctx.must_use_type_by_name(self) + } +} + +impl IsOpaque for T +where + T: Copy + Into, +{ + type Extra = (); + + fn is_opaque(&self, ctx: &BindgenContext, _: &()) -> bool { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.resolve_item((*self).into()).is_opaque(ctx, &()) + } +} + +impl IsOpaque for Item { + type Extra = (); + + fn is_opaque(&self, ctx: &BindgenContext, _: &()) -> bool { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + self.annotations.opaque() || + self.as_type().map_or(false, |ty| ty.is_opaque(ctx, self)) || + ctx.opaque_by_name(self.path_for_allowlisting(ctx)) + } +} + +impl HasVtable for T +where + T: Copy + Into, +{ + fn has_vtable(&self, ctx: &BindgenContext) -> bool { + let id: ItemId = (*self).into(); + id.as_type_id(ctx).map_or(false, |id| { + !matches!(ctx.lookup_has_vtable(id), HasVtableResult::No) + }) + } + + fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool { + let id: ItemId = (*self).into(); + id.as_type_id(ctx).map_or(false, |id| { + matches!(ctx.lookup_has_vtable(id), HasVtableResult::SelfHasVtable) + }) + } +} + +impl HasVtable for Item { + fn has_vtable(&self, ctx: &BindgenContext) -> bool { + self.id().has_vtable(ctx) + } + + fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool { + self.id().has_vtable_ptr(ctx) + } +} + +impl Sizedness for T +where + T: Copy + Into, +{ + fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult { + let id: ItemId = (*self).into(); + id.as_type_id(ctx) + .map_or(SizednessResult::default(), |id| ctx.lookup_sizedness(id)) + } +} + +impl Sizedness for Item { + fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult { + self.id().sizedness(ctx) + } +} + +impl HasTypeParamInArray for T +where + T: Copy + Into, +{ + fn has_type_param_in_array(&self, ctx: &BindgenContext) -> bool { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.lookup_has_type_param_in_array(*self) + } +} + +impl HasTypeParamInArray for Item { + fn has_type_param_in_array(&self, ctx: &BindgenContext) -> bool { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.lookup_has_type_param_in_array(self.id()) + } +} + +impl HasFloat for T +where + T: Copy + Into, +{ + fn has_float(&self, ctx: &BindgenContext) -> bool { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.lookup_has_float(*self) + } +} + +impl HasFloat for Item { + fn has_float(&self, ctx: &BindgenContext) -> bool { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.lookup_has_float(self.id()) + } +} + +/// A set of items. +pub(crate) type ItemSet = BTreeSet; + +impl DotAttributes for Item { + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!( + out, + "{:?} + name{}", + self.id, + self.name(ctx).get() + )?; + + if self.is_opaque(ctx, &()) { + writeln!(out, "opaquetrue")?; + } + + self.kind.dot_attributes(ctx, out) + } +} + +impl TemplateParameters for T +where + T: Copy + Into, +{ + fn self_template_params(&self, ctx: &BindgenContext) -> Vec { + ctx.resolve_item_fallible(*self) + .map_or(vec![], |item| item.self_template_params(ctx)) + } +} + +impl TemplateParameters for Item { + fn self_template_params(&self, ctx: &BindgenContext) -> Vec { + self.kind.self_template_params(ctx) + } +} + +impl TemplateParameters for ItemKind { + fn self_template_params(&self, ctx: &BindgenContext) -> Vec { + match *self { + ItemKind::Type(ref ty) => ty.self_template_params(ctx), + // If we start emitting bindings to explicitly instantiated + // functions, then we'll need to check ItemKind::Function for + // template params. + ItemKind::Function(_) | ItemKind::Module(_) | ItemKind::Var(_) => { + vec![] + } + } + } +} + +// An utility function to handle recursing inside nested types. +fn visit_child( + cur: clang::Cursor, + id: ItemId, + ty: &clang::Type, + parent_id: Option, + ctx: &mut BindgenContext, + result: &mut Result, +) -> clang_sys::CXChildVisitResult { + use clang_sys::*; + if result.is_ok() { + return CXChildVisit_Break; + } + + *result = Item::from_ty_with_id(id, ty, cur, parent_id, ctx); + + match *result { + Ok(..) => CXChildVisit_Break, + Err(ParseError::Recurse) => { + cur.visit(|c| visit_child(c, id, ty, parent_id, ctx, result)); + CXChildVisit_Continue + } + Err(ParseError::Continue) => CXChildVisit_Continue, + } +} + +impl Item { + /// Create a builtin type. + pub(crate) fn builtin_type( + kind: TypeKind, + is_const: bool, + ctx: &mut BindgenContext, + ) -> TypeId { + // Feel free to add more here, I'm just lazy. + match kind { + TypeKind::Void | + TypeKind::Int(..) | + TypeKind::Pointer(..) | + TypeKind::Float(..) => {} + _ => panic!("Unsupported builtin type"), + } + + let ty = Type::new(None, None, kind, is_const); + let id = ctx.next_item_id(); + let module = ctx.root_module().into(); + ctx.add_item( + Item::new(id, None, None, module, ItemKind::Type(ty), None), + None, + None, + ); + id.as_type_id_unchecked() + } + + /// Parse this item from the given Clang cursor. + pub(crate) fn parse( + cursor: clang::Cursor, + parent_id: Option, + ctx: &mut BindgenContext, + ) -> Result { + use crate::ir::var::Var; + use clang_sys::*; + + if !cursor.is_valid() { + return Err(ParseError::Continue); + } + + let comment = cursor.raw_comment(); + let annotations = Annotations::new(&cursor); + + let current_module = ctx.current_module().into(); + let relevant_parent_id = parent_id.unwrap_or(current_module); + + #[allow(clippy::missing_docs_in_private_items)] + macro_rules! try_parse { + ($what:ident) => { + match $what::parse(cursor, ctx) { + Ok(ParseResult::New(item, declaration)) => { + let id = ctx.next_item_id(); + + ctx.add_item( + Item::new( + id, + comment, + annotations, + relevant_parent_id, + ItemKind::$what(item), + Some(cursor.location()), + ), + declaration, + Some(cursor), + ); + return Ok(id); + } + Ok(ParseResult::AlreadyResolved(id)) => { + return Ok(id); + } + Err(ParseError::Recurse) => return Err(ParseError::Recurse), + Err(ParseError::Continue) => {} + } + }; + } + + try_parse!(Module); + + // NOTE: Is extremely important to parse functions and vars **before** + // types. Otherwise we can parse a function declaration as a type + // (which is legal), and lose functions to generate. + // + // In general, I'm not totally confident this split between + // ItemKind::Function and TypeKind::FunctionSig is totally worth it, but + // I guess we can try. + try_parse!(Function); + try_parse!(Var); + + // Types are sort of special, so to avoid parsing template classes + // twice, handle them separately. + { + let definition = cursor.definition(); + let applicable_cursor = definition.unwrap_or(cursor); + + let relevant_parent_id = match definition { + Some(definition) => { + if definition != cursor { + ctx.add_semantic_parent(definition, relevant_parent_id); + return Ok(Item::from_ty_or_ref( + applicable_cursor.cur_type(), + cursor, + parent_id, + ctx, + ) + .into()); + } + ctx.known_semantic_parent(definition) + .or(parent_id) + .unwrap_or_else(|| ctx.current_module().into()) + } + None => relevant_parent_id, + }; + + match Item::from_ty( + &applicable_cursor.cur_type(), + applicable_cursor, + Some(relevant_parent_id), + ctx, + ) { + Ok(ty) => return Ok(ty.into()), + Err(ParseError::Recurse) => return Err(ParseError::Recurse), + Err(ParseError::Continue) => {} + } + } + + // Guess how does clang treat extern "C" blocks? + if cursor.kind() == CXCursor_UnexposedDecl { + Err(ParseError::Recurse) + } else { + // We allowlist cursors here known to be unhandled, to prevent being + // too noisy about this. + match cursor.kind() { + CXCursor_MacroDefinition | + CXCursor_MacroExpansion | + CXCursor_UsingDeclaration | + CXCursor_UsingDirective | + CXCursor_StaticAssert | + CXCursor_FunctionTemplate => { + debug!( + "Unhandled cursor kind {:?}: {:?}", + cursor.kind(), + cursor + ); + } + CXCursor_InclusionDirective => { + let file = cursor.get_included_file_name(); + match file { + None => { + warn!( + "Inclusion of a nameless file in {:?}", + cursor + ); + } + Some(filename) => { + ctx.include_file(filename); + } + } + } + _ => { + // ignore toplevel operator overloads + let spelling = cursor.spelling(); + if !spelling.starts_with("operator") { + warn!( + "Unhandled cursor kind {:?}: {:?}", + cursor.kind(), + cursor + ); + } + } + } + + Err(ParseError::Continue) + } + } + + /// Parse this item from the given Clang type, or if we haven't resolved all + /// the other items this one depends on, an unresolved reference. + pub(crate) fn from_ty_or_ref( + ty: clang::Type, + location: clang::Cursor, + parent_id: Option, + ctx: &mut BindgenContext, + ) -> TypeId { + let id = ctx.next_item_id(); + Self::from_ty_or_ref_with_id(id, ty, location, parent_id, ctx) + } + + /// Parse a C++ type. If we find a reference to a type that has not been + /// defined yet, use `UnresolvedTypeRef` as a placeholder. + /// + /// This logic is needed to avoid parsing items with the incorrect parent + /// and it's sort of complex to explain, so I'll just point to + /// `tests/headers/typeref.hpp` to see the kind of constructs that forced + /// this. + /// + /// Typerefs are resolved once parsing is completely done, see + /// `BindgenContext::resolve_typerefs`. + pub(crate) fn from_ty_or_ref_with_id( + potential_id: ItemId, + ty: clang::Type, + location: clang::Cursor, + parent_id: Option, + ctx: &mut BindgenContext, + ) -> TypeId { + debug!( + "from_ty_or_ref_with_id: {:?} {:?}, {:?}, {:?}", + potential_id, ty, location, parent_id + ); + + if ctx.collected_typerefs() { + debug!("refs already collected, resolving directly"); + return Item::from_ty_with_id( + potential_id, + &ty, + location, + parent_id, + ctx, + ) + .unwrap_or_else(|_| Item::new_opaque_type(potential_id, &ty, ctx)); + } + + if let Some(ty) = ctx.builtin_or_resolved_ty( + potential_id, + parent_id, + &ty, + Some(location), + ) { + debug!("{:?} already resolved: {:?}", ty, location); + return ty; + } + + debug!("New unresolved type reference: {:?}, {:?}", ty, location); + + let is_const = ty.is_const(); + let kind = TypeKind::UnresolvedTypeRef(ty, location, parent_id); + let current_module = ctx.current_module(); + + ctx.add_item( + Item::new( + potential_id, + None, + None, + parent_id.unwrap_or_else(|| current_module.into()), + ItemKind::Type(Type::new(None, None, kind, is_const)), + Some(location.location()), + ), + None, + None, + ); + potential_id.as_type_id_unchecked() + } + + /// Parse this item from the given Clang type. See [`Item::from_ty_with_id`]. + pub(crate) fn from_ty( + ty: &clang::Type, + location: clang::Cursor, + parent_id: Option, + ctx: &mut BindgenContext, + ) -> Result { + let id = ctx.next_item_id(); + Item::from_ty_with_id(id, ty, location, parent_id, ctx) + } + + /// This is one of the trickiest methods you'll find (probably along with + /// some of the ones that handle templates in `BindgenContext`). + /// + /// This method parses a type, given the potential ID of that type (if + /// parsing it was correct), an optional location we're scanning, which is + /// critical some times to obtain information, an optional parent item ID, + /// that will, if it's `None`, become the current module ID, and the + /// context. + pub(crate) fn from_ty_with_id( + id: ItemId, + ty: &clang::Type, + location: clang::Cursor, + parent_id: Option, + ctx: &mut BindgenContext, + ) -> Result { + use clang_sys::*; + + debug!( + "Item::from_ty_with_id: {:?}\n\ + \tty = {:?},\n\ + \tlocation = {:?}", + id, ty, location + ); + + if ty.kind() == clang_sys::CXType_Unexposed || + location.cur_type().kind() == clang_sys::CXType_Unexposed + { + if ty.is_associated_type() || + location.cur_type().is_associated_type() + { + return Ok(Item::new_opaque_type(id, ty, ctx)); + } + + if let Some(param_id) = Item::type_param(None, location, ctx) { + return Ok(ctx.build_ty_wrapper(id, param_id, None, ty)); + } + } + + // Treat all types that are declared inside functions as opaque. The Rust binding + // won't be able to do anything with them anyway. + // + // (If we don't do this check here, we can have subtle logic bugs because we generally + // ignore function bodies. See issue #2036.) + if let Some(ref parent) = ty.declaration().fallible_semantic_parent() { + if FunctionKind::from_cursor(parent).is_some() { + debug!("Skipping type declared inside function: {:?}", ty); + return Ok(Item::new_opaque_type(id, ty, ctx)); + } + } + + let decl = { + let canonical_def = ty.canonical_type().declaration().definition(); + canonical_def.unwrap_or_else(|| ty.declaration()) + }; + + let comment = location + .raw_comment() + .or_else(|| decl.raw_comment()) + .or_else(|| location.raw_comment()); + + let annotations = + Annotations::new(&decl).or_else(|| Annotations::new(&location)); + + if let Some(ref annotations) = annotations { + if let Some(replaced) = annotations.use_instead_of() { + ctx.replace(replaced, id); + } + } + + if let Some(ty) = + ctx.builtin_or_resolved_ty(id, parent_id, ty, Some(location)) + { + return Ok(ty); + } + + // First, check we're not recursing. + let mut valid_decl = decl.kind() != CXCursor_NoDeclFound; + let declaration_to_look_for = if valid_decl { + decl.canonical() + } else if location.kind() == CXCursor_ClassTemplate { + valid_decl = true; + location + } else { + decl + }; + + if valid_decl { + if let Some(partial) = ctx + .currently_parsed_types() + .iter() + .find(|ty| *ty.decl() == declaration_to_look_for) + { + debug!("Avoiding recursion parsing type: {:?}", ty); + // Unchecked because we haven't finished this type yet. + return Ok(partial.id().as_type_id_unchecked()); + } + } + + let current_module = ctx.current_module().into(); + let partial_ty = PartialType::new(declaration_to_look_for, id); + if valid_decl { + ctx.begin_parsing(partial_ty); + } + + let result = Type::from_clang_ty(id, ty, location, parent_id, ctx); + let relevant_parent_id = parent_id.unwrap_or(current_module); + let ret = match result { + Ok(ParseResult::AlreadyResolved(ty)) => { + Ok(ty.as_type_id_unchecked()) + } + Ok(ParseResult::New(item, declaration)) => { + ctx.add_item( + Item::new( + id, + comment, + annotations, + relevant_parent_id, + ItemKind::Type(item), + Some(location.location()), + ), + declaration, + Some(location), + ); + Ok(id.as_type_id_unchecked()) + } + Err(ParseError::Continue) => Err(ParseError::Continue), + Err(ParseError::Recurse) => { + debug!("Item::from_ty recursing in the ast"); + let mut result = Err(ParseError::Recurse); + + // Need to pop here, otherwise we'll get stuck. + // + // TODO: Find a nicer interface, really. Also, the + // declaration_to_look_for suspiciously shares a lot of + // logic with ir::context, so we should refactor that. + if valid_decl { + let finished = ctx.finish_parsing(); + assert_eq!(*finished.decl(), declaration_to_look_for); + } + + location.visit(|cur| { + visit_child(cur, id, ty, parent_id, ctx, &mut result) + }); + + if valid_decl { + let partial_ty = + PartialType::new(declaration_to_look_for, id); + ctx.begin_parsing(partial_ty); + } + + // If we have recursed into the AST all we know, and we still + // haven't found what we've got, let's just try and make a named + // type. + // + // This is what happens with some template members, for example. + if let Err(ParseError::Recurse) = result { + warn!( + "Unknown type, assuming named template type: \ + id = {:?}; spelling = {}", + id, + ty.spelling() + ); + Item::type_param(Some(id), location, ctx) + .map(Ok) + .unwrap_or(Err(ParseError::Recurse)) + } else { + result + } + } + }; + + if valid_decl { + let partial_ty = ctx.finish_parsing(); + assert_eq!(*partial_ty.decl(), declaration_to_look_for); + } + + ret + } + + /// A named type is a template parameter, e.g., the `T` in `Foo`. They're always local so + /// it's the only exception when there's no declaration for a type. + pub(crate) fn type_param( + with_id: Option, + location: clang::Cursor, + ctx: &mut BindgenContext, + ) -> Option { + let ty = location.cur_type(); + + debug!( + "Item::type_param:\n\ + \twith_id = {:?},\n\ + \tty = {} {:?},\n\ + \tlocation: {:?}", + with_id, + ty.spelling(), + ty, + location + ); + + if ty.kind() != clang_sys::CXType_Unexposed { + // If the given cursor's type's kind is not Unexposed, then we + // aren't looking at a template parameter. This check may need to be + // updated in the future if they start properly exposing template + // type parameters. + return None; + } + + let ty_spelling = ty.spelling(); + + // Clang does not expose any information about template type parameters + // via their clang::Type, nor does it give us their canonical cursors + // the straightforward way. However, there are three situations from + // which we can find the definition of the template type parameter, if + // the cursor is indeed looking at some kind of a template type + // parameter or use of one: + // + // 1. The cursor is pointing at the template type parameter's + // definition. This is the trivial case. + // + // (kind = TemplateTypeParameter, ...) + // + // 2. The cursor is pointing at a TypeRef whose referenced() cursor is + // situation (1). + // + // (kind = TypeRef, + // referenced = (kind = TemplateTypeParameter, ...), + // ...) + // + // 3. The cursor is pointing at some use of a template type parameter + // (for example, in a FieldDecl), and this cursor has a child cursor + // whose spelling is the same as the parent's type's spelling, and whose + // kind is a TypeRef of the situation (2) variety. + // + // (kind = FieldDecl, + // type = (kind = Unexposed, + // spelling = "T", + // ...), + // children = + // (kind = TypeRef, + // spelling = "T", + // referenced = (kind = TemplateTypeParameter, + // spelling = "T", + // ...), + // ...) + // ...) + // + // TODO: The alternative to this hacky pattern matching would be to + // maintain proper scopes of template parameters while parsing and use + // de Brujin indices to access template parameters, which clang exposes + // in the cursor's type's canonical type's spelling: + // "type-parameter-x-y". That is probably a better approach long-term, + // but maintaining these scopes properly would require more changes to + // the whole libclang -> IR parsing code. + + fn is_template_with_spelling( + refd: &clang::Cursor, + spelling: &str, + ) -> bool { + lazy_static! { + static ref ANON_TYPE_PARAM_RE: regex::Regex = + regex::Regex::new(r"^type\-parameter\-\d+\-\d+$").unwrap(); + } + + if refd.kind() != clang_sys::CXCursor_TemplateTypeParameter { + return false; + } + + let refd_spelling = refd.spelling(); + refd_spelling == spelling || + // Allow for anonymous template parameters. + (refd_spelling.is_empty() && ANON_TYPE_PARAM_RE.is_match(spelling.as_ref())) + } + + let definition = if is_template_with_spelling(&location, &ty_spelling) { + // Situation (1) + location + } else if location.kind() == clang_sys::CXCursor_TypeRef { + // Situation (2) + match location.referenced() { + Some(refd) + if is_template_with_spelling(&refd, &ty_spelling) => + { + refd + } + _ => return None, + } + } else { + // Situation (3) + let mut definition = None; + + location.visit(|child| { + let child_ty = child.cur_type(); + if child_ty.kind() == clang_sys::CXCursor_TypeRef && + child_ty.spelling() == ty_spelling + { + match child.referenced() { + Some(refd) + if is_template_with_spelling( + &refd, + &ty_spelling, + ) => + { + definition = Some(refd); + return clang_sys::CXChildVisit_Break; + } + _ => {} + } + } + + clang_sys::CXChildVisit_Continue + }); + + definition? + }; + assert!(is_template_with_spelling(&definition, &ty_spelling)); + + // Named types are always parented to the root module. They are never + // referenced with namespace prefixes, and they can't inherit anything + // from their parent either, so it is simplest to just hang them off + // something we know will always exist. + let parent = ctx.root_module().into(); + + if let Some(id) = ctx.get_type_param(&definition) { + if let Some(with_id) = with_id { + return Some(ctx.build_ty_wrapper( + with_id, + id, + Some(parent), + &ty, + )); + } else { + return Some(id); + } + } + + // See tests/headers/const_tparam.hpp and + // tests/headers/variadic_tname.hpp. + let name = ty_spelling.replace("const ", "").replace('.', ""); + + let id = with_id.unwrap_or_else(|| ctx.next_item_id()); + let item = Item::new( + id, + None, + None, + parent, + ItemKind::Type(Type::named(name)), + Some(location.location()), + ); + ctx.add_type_param(item, definition); + Some(id.as_type_id_unchecked()) + } +} + +impl ItemCanonicalName for Item { + fn canonical_name(&self, ctx: &BindgenContext) -> String { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + self.canonical_name + .borrow_with(|| { + let in_namespace = ctx.options().enable_cxx_namespaces || + ctx.options().disable_name_namespacing; + + if in_namespace { + self.name(ctx).within_namespaces().get() + } else { + self.name(ctx).get() + } + }) + .clone() + } +} + +impl ItemCanonicalPath for Item { + fn namespace_aware_canonical_path( + &self, + ctx: &BindgenContext, + ) -> Vec { + let mut path = self.canonical_path(ctx); + + // ASSUMPTION: (disable_name_namespacing && cxx_namespaces) + // is equivalent to + // disable_name_namespacing + if ctx.options().disable_name_namespacing { + // Only keep the last item in path + let split_idx = path.len() - 1; + path = path.split_off(split_idx); + } else if !ctx.options().enable_cxx_namespaces { + // Ignore first item "root" + path = vec![path[1..].join("_")]; + } + + if self.is_constified_enum_module(ctx) { + path.push(CONSTIFIED_ENUM_MODULE_REPR_NAME.into()); + } + + path + } + + fn canonical_path(&self, ctx: &BindgenContext) -> Vec { + self.compute_path(ctx, UserMangled::Yes) + } +} + +/// Whether to use the user-mangled name (mangled by the `item_name` callback or +/// not. +/// +/// Most of the callers probably want just yes, but the ones dealing with +/// allowlisting and blocklisting don't. +#[derive(Copy, Clone, Debug, PartialEq)] +enum UserMangled { + No, + Yes, +} + +/// Builder struct for naming variations, which hold inside different +/// flags for naming options. +#[derive(Debug)] +pub(crate) struct NameOptions<'a> { + item: &'a Item, + ctx: &'a BindgenContext, + within_namespaces: bool, + user_mangled: UserMangled, +} + +impl<'a> NameOptions<'a> { + /// Construct a new `NameOptions` + pub(crate) fn new(item: &'a Item, ctx: &'a BindgenContext) -> Self { + NameOptions { + item, + ctx, + within_namespaces: false, + user_mangled: UserMangled::Yes, + } + } + + /// Construct the name without the item's containing C++ namespaces mangled + /// into it. In other words, the item's name within the item's namespace. + pub(crate) fn within_namespaces(&mut self) -> &mut Self { + self.within_namespaces = true; + self + } + + fn user_mangled(&mut self, user_mangled: UserMangled) -> &mut Self { + self.user_mangled = user_mangled; + self + } + + /// Construct a name `String` + pub(crate) fn get(&self) -> String { + self.item.real_canonical_name(self.ctx, self) + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/layout.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/layout.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/layout.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/layout.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,136 @@ +//! Intermediate representation for the physical layout of some type. + +use super::derive::CanDerive; +use super::ty::{Type, TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT}; +use crate::clang; +use crate::ir::context::BindgenContext; +use std::cmp; + +/// A type that represents the struct layout of a type. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct Layout { + /// The size (in bytes) of this layout. + pub(crate) size: usize, + /// The alignment (in bytes) of this layout. + pub(crate) align: usize, + /// Whether this layout's members are packed or not. + pub(crate) packed: bool, +} + +#[test] +fn test_layout_for_size() { + use std::mem; + + let ptr_size = mem::size_of::<*mut ()>(); + assert_eq!( + Layout::for_size_internal(ptr_size, ptr_size), + Layout::new(ptr_size, ptr_size) + ); + assert_eq!( + Layout::for_size_internal(ptr_size, 3 * ptr_size), + Layout::new(3 * ptr_size, ptr_size) + ); +} + +impl Layout { + /// Gets the integer type name for a given known size. + pub(crate) fn known_type_for_size( + ctx: &BindgenContext, + size: usize, + ) -> Option<&'static str> { + Some(match size { + 16 if ctx.options().rust_features.i128_and_u128 => "u128", + 8 => "u64", + 4 => "u32", + 2 => "u16", + 1 => "u8", + _ => return None, + }) + } + + /// Construct a new `Layout` with the given `size` and `align`. It is not + /// packed. + pub(crate) fn new(size: usize, align: usize) -> Self { + Layout { + size, + align, + packed: false, + } + } + + fn for_size_internal(ptr_size: usize, size: usize) -> Self { + let mut next_align = 2; + while size % next_align == 0 && next_align <= ptr_size { + next_align *= 2; + } + Layout { + size, + align: next_align / 2, + packed: false, + } + } + + /// Creates a non-packed layout for a given size, trying to use the maximum + /// alignment possible. + pub(crate) fn for_size(ctx: &BindgenContext, size: usize) -> Self { + Self::for_size_internal(ctx.target_pointer_size(), size) + } + + /// Get this layout as an opaque type. + pub(crate) fn opaque(&self) -> Opaque { + Opaque(*self) + } +} + +/// When we are treating a type as opaque, it is just a blob with a `Layout`. +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct Opaque(pub(crate) Layout); + +impl Opaque { + /// Construct a new opaque type from the given clang type. + pub(crate) fn from_clang_ty( + ty: &clang::Type, + ctx: &BindgenContext, + ) -> Type { + let layout = Layout::new(ty.size(ctx), ty.align(ctx)); + let ty_kind = TypeKind::Opaque; + let is_const = ty.is_const(); + Type::new(None, Some(layout), ty_kind, is_const) + } + + /// Return the known rust type we should use to create a correctly-aligned + /// field with this layout. + pub(crate) fn known_rust_type_for_array( + &self, + ctx: &BindgenContext, + ) -> Option<&'static str> { + Layout::known_type_for_size(ctx, self.0.align) + } + + /// Return the array size that an opaque type for this layout should have if + /// we know the correct type for it, or `None` otherwise. + pub(crate) fn array_size(&self, ctx: &BindgenContext) -> Option { + if self.known_rust_type_for_array(ctx).is_some() { + Some(self.0.size / cmp::max(self.0.align, 1)) + } else { + None + } + } + + /// Return `true` if this opaque layout's array size will fit within the + /// maximum number of array elements that Rust allows deriving traits + /// with. Return `false` otherwise. + pub(crate) fn array_size_within_derive_limit( + &self, + ctx: &BindgenContext, + ) -> CanDerive { + if self + .array_size(ctx) + .map_or(false, |size| size <= RUST_DERIVE_IN_ARRAY_LIMIT) + { + CanDerive::Yes + } else { + CanDerive::Manually + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/mod.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,25 @@ +//! The ir module defines bindgen's intermediate representation. +//! +//! Parsing C/C++ generates the IR, while code generation outputs Rust code from +//! the IR. +#![deny(clippy::missing_docs_in_private_items)] + +pub(crate) mod analysis; +pub(crate) mod annotations; +pub(crate) mod comment; +pub(crate) mod comp; +pub(crate) mod context; +pub(crate) mod derive; +pub(crate) mod dot; +pub(crate) mod enum_ty; +pub(crate) mod function; +pub(crate) mod int; +pub(crate) mod item; +pub(crate) mod item_kind; +pub(crate) mod layout; +pub(crate) mod module; +pub(crate) mod objc; +pub(crate) mod template; +pub(crate) mod traversal; +pub(crate) mod ty; +pub(crate) mod var; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/module.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/module.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/module.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/module.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,95 @@ +//! Intermediate representation for modules (AKA C++ namespaces). + +use super::context::BindgenContext; +use super::dot::DotAttributes; +use super::item::ItemSet; +use crate::clang; +use crate::parse::{ClangSubItemParser, ParseError, ParseResult}; +use crate::parse_one; +use std::io; + +/// Whether this module is inline or not. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum ModuleKind { + /// This module is not inline. + Normal, + /// This module is inline, as in `inline namespace foo {}`. + Inline, +} + +/// A module, as in, a C++ namespace. +#[derive(Clone, Debug)] +pub(crate) struct Module { + /// The name of the module, or none if it's anonymous. + name: Option, + /// The kind of module this is. + kind: ModuleKind, + /// The children of this module, just here for convenience. + children: ItemSet, +} + +impl Module { + /// Construct a new `Module`. + pub(crate) fn new(name: Option, kind: ModuleKind) -> Self { + Module { + name, + kind, + children: ItemSet::new(), + } + } + + /// Get this module's name. + pub(crate) fn name(&self) -> Option<&str> { + self.name.as_deref() + } + + /// Get a mutable reference to this module's children. + pub(crate) fn children_mut(&mut self) -> &mut ItemSet { + &mut self.children + } + + /// Get this module's children. + pub(crate) fn children(&self) -> &ItemSet { + &self.children + } + + /// Whether this namespace is inline. + pub(crate) fn is_inline(&self) -> bool { + self.kind == ModuleKind::Inline + } +} + +impl DotAttributes for Module { + fn dot_attributes( + &self, + _ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!(out, "ModuleKind{:?}", self.kind) + } +} + +impl ClangSubItemParser for Module { + fn parse( + cursor: clang::Cursor, + ctx: &mut BindgenContext, + ) -> Result, ParseError> { + use clang_sys::*; + match cursor.kind() { + CXCursor_Namespace => { + let module_id = ctx.module(cursor); + ctx.with_module(module_id, |ctx| { + cursor.visit(|cursor| { + parse_one(ctx, cursor, Some(module_id.into())) + }) + }); + + Ok(ParseResult::AlreadyResolved(module_id.into())) + } + _ => Err(ParseError::Continue), + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/objc.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/objc.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/objc.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/objc.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,335 @@ +//! Objective C types + +use super::context::{BindgenContext, ItemId}; +use super::function::FunctionSig; +use super::item::Item; +use super::traversal::{Trace, Tracer}; +use super::ty::TypeKind; +use crate::clang; +use clang_sys::CXChildVisit_Continue; +use clang_sys::CXCursor_ObjCCategoryDecl; +use clang_sys::CXCursor_ObjCClassMethodDecl; +use clang_sys::CXCursor_ObjCClassRef; +use clang_sys::CXCursor_ObjCInstanceMethodDecl; +use clang_sys::CXCursor_ObjCProtocolDecl; +use clang_sys::CXCursor_ObjCProtocolRef; +use clang_sys::CXCursor_ObjCSuperClassRef; +use clang_sys::CXCursor_TemplateTypeParameter; +use proc_macro2::{Ident, Span, TokenStream}; + +/// Objective C interface as used in TypeKind +/// +/// Also protocols and categories are parsed as this type +#[derive(Debug)] +pub(crate) struct ObjCInterface { + /// The name + /// like, NSObject + name: String, + + category: Option, + + is_protocol: bool, + + /// The list of template names almost always, ObjectType or KeyType + pub(crate) template_names: Vec, + + /// The list of protocols that this interface conforms to. + pub(crate) conforms_to: Vec, + + /// The direct parent for this interface. + pub(crate) parent_class: Option, + + /// List of the methods defined in this interfae + methods: Vec, + + class_methods: Vec, +} + +/// The objective c methods +#[derive(Debug)] +pub(crate) struct ObjCMethod { + /// The original method selector name + /// like, dataWithBytes:length: + name: String, + + /// Method name as converted to rust + /// like, dataWithBytes_length_ + rust_name: String, + + signature: FunctionSig, + + /// Is class method? + is_class_method: bool, +} + +impl ObjCInterface { + fn new(name: &str) -> ObjCInterface { + ObjCInterface { + name: name.to_owned(), + category: None, + is_protocol: false, + template_names: Vec::new(), + parent_class: None, + conforms_to: Vec::new(), + methods: Vec::new(), + class_methods: Vec::new(), + } + } + + /// The name + /// like, NSObject + pub(crate) fn name(&self) -> &str { + self.name.as_ref() + } + + /// Formats the name for rust + /// Can be like NSObject, but with categories might be like NSObject_NSCoderMethods + /// and protocols are like PNSObject + pub(crate) fn rust_name(&self) -> String { + if let Some(ref cat) = self.category { + format!("{}_{}", self.name(), cat) + } else if self.is_protocol { + format!("P{}", self.name()) + } else { + format!("I{}", self.name().to_owned()) + } + } + + /// Is this a template interface? + pub(crate) fn is_template(&self) -> bool { + !self.template_names.is_empty() + } + + /// List of the methods defined in this interface + pub(crate) fn methods(&self) -> &Vec { + &self.methods + } + + /// Is this a protocol? + pub(crate) fn is_protocol(&self) -> bool { + self.is_protocol + } + + /// Is this a category? + pub(crate) fn is_category(&self) -> bool { + self.category.is_some() + } + + /// List of the class methods defined in this interface + pub(crate) fn class_methods(&self) -> &Vec { + &self.class_methods + } + + /// Parses the Objective C interface from the cursor + pub(crate) fn from_ty( + cursor: &clang::Cursor, + ctx: &mut BindgenContext, + ) -> Option { + let name = cursor.spelling(); + let mut interface = Self::new(&name); + + if cursor.kind() == CXCursor_ObjCProtocolDecl { + interface.is_protocol = true; + } + + cursor.visit(|c| { + match c.kind() { + CXCursor_ObjCClassRef => { + if cursor.kind() == CXCursor_ObjCCategoryDecl { + // We are actually a category extension, and we found the reference + // to the original interface, so name this interface approriately + interface.name = c.spelling(); + interface.category = Some(cursor.spelling()); + } + } + CXCursor_ObjCProtocolRef => { + // Gather protocols this interface conforms to + let needle = format!("P{}", c.spelling()); + let items_map = ctx.items(); + debug!( + "Interface {} conforms to {}, find the item", + interface.name, needle + ); + + for (id, item) in items_map { + if let Some(ty) = item.as_type() { + if let TypeKind::ObjCInterface(ref protocol) = + *ty.kind() + { + if protocol.is_protocol { + debug!( + "Checking protocol {}, ty.name {:?}", + protocol.name, + ty.name() + ); + if Some(needle.as_ref()) == ty.name() { + debug!( + "Found conforming protocol {:?}", + item + ); + interface.conforms_to.push(id); + break; + } + } + } + } + } + } + CXCursor_ObjCInstanceMethodDecl | + CXCursor_ObjCClassMethodDecl => { + let name = c.spelling(); + let signature = + FunctionSig::from_ty(&c.cur_type(), &c, ctx) + .expect("Invalid function sig"); + let is_class_method = + c.kind() == CXCursor_ObjCClassMethodDecl; + let method = + ObjCMethod::new(&name, signature, is_class_method); + interface.add_method(method); + } + CXCursor_TemplateTypeParameter => { + let name = c.spelling(); + interface.template_names.push(name); + } + CXCursor_ObjCSuperClassRef => { + let item = Item::from_ty_or_ref(c.cur_type(), c, None, ctx); + interface.parent_class = Some(item.into()); + } + _ => {} + } + CXChildVisit_Continue + }); + Some(interface) + } + + fn add_method(&mut self, method: ObjCMethod) { + if method.is_class_method { + self.class_methods.push(method); + } else { + self.methods.push(method); + } + } +} + +impl ObjCMethod { + fn new( + name: &str, + signature: FunctionSig, + is_class_method: bool, + ) -> ObjCMethod { + let split_name: Vec<&str> = name.split(':').collect(); + + let rust_name = split_name.join("_"); + + ObjCMethod { + name: name.to_owned(), + rust_name, + signature, + is_class_method, + } + } + + /// Method name as converted to rust + /// like, dataWithBytes_length_ + pub(crate) fn rust_name(&self) -> &str { + self.rust_name.as_ref() + } + + /// Returns the methods signature as FunctionSig + pub(crate) fn signature(&self) -> &FunctionSig { + &self.signature + } + + /// Is this a class method? + pub(crate) fn is_class_method(&self) -> bool { + self.is_class_method + } + + /// Formats the method call + pub(crate) fn format_method_call( + &self, + args: &[TokenStream], + ) -> TokenStream { + let split_name: Vec> = self + .name + .split(':') + .map(|name| { + if name.is_empty() { + None + } else { + // Try to parse the current name as an identifier. This might fail if the + // name is a keyword so we try to prepend "r#" to it and parse again. If + // this also fails, we panic with the first error. + Some( + syn::parse_str::(name) + .or_else(|err| { + syn::parse_str::(&format!("r#{}", name)) + .map_err(|_| err) + }) + .expect("Invalid identifier"), + ) + } + }) + .collect(); + + // No arguments + if args.is_empty() && split_name.len() == 1 { + let name = &split_name[0]; + return quote! { + #name + }; + } + + // Check right amount of arguments + assert!( + args.len() == split_name.len() - 1, + "Incorrect method name or arguments for objc method, {:?} vs {:?}", + args, + split_name + ); + + // Get arguments without type signatures to pass to `msg_send!` + let mut args_without_types = vec![]; + for arg in args.iter() { + let arg = arg.to_string(); + let name_and_sig: Vec<&str> = arg.split(' ').collect(); + let name = name_and_sig[0]; + args_without_types.push(Ident::new(name, Span::call_site())) + } + + let args = split_name.into_iter().zip(args_without_types).map( + |(arg, arg_val)| { + if let Some(arg) = arg { + quote! { #arg: #arg_val } + } else { + quote! { #arg_val: #arg_val } + } + }, + ); + + quote! { + #( #args )* + } + } +} + +impl Trace for ObjCInterface { + type Extra = (); + + fn trace(&self, context: &BindgenContext, tracer: &mut T, _: &()) + where + T: Tracer, + { + for method in &self.methods { + method.signature.trace(context, tracer, &()); + } + + for class_method in &self.class_methods { + class_method.signature.trace(context, tracer, &()); + } + + for protocol in &self.conforms_to { + tracer.visit(*protocol); + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/template.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/template.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/template.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/template.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,342 @@ +//! Template declaration and instantiation related things. +//! +//! The nomenclature surrounding templates is often confusing, so here are a few +//! brief definitions: +//! +//! * "Template definition": a class/struct/alias/function definition that takes +//! generic template parameters. For example: +//! +//! ```c++ +//! template +//! class List { +//! // ... +//! }; +//! ``` +//! +//! * "Template instantiation": an instantiation is a use of a template with +//! concrete template arguments. For example, `List`. +//! +//! * "Template specialization": an alternative template definition providing a +//! custom definition for instantiations with the matching template +//! arguments. This C++ feature is unsupported by bindgen. For example: +//! +//! ```c++ +//! template<> +//! class List { +//! // Special layout for int lists... +//! }; +//! ``` + +use super::context::{BindgenContext, ItemId, TypeId}; +use super::item::{IsOpaque, Item, ItemAncestors}; +use super::traversal::{EdgeKind, Trace, Tracer}; +use crate::clang; + +/// Template declaration (and such declaration's template parameters) related +/// methods. +/// +/// This trait's methods distinguish between `None` and `Some([])` for +/// declarations that are not templates and template declarations with zero +/// parameters, in general. +/// +/// Consider this example: +/// +/// ```c++ +/// template +/// class Foo { +/// T use_of_t; +/// U use_of_u; +/// +/// template +/// using Bar = V*; +/// +/// class Inner { +/// T x; +/// U y; +/// Bar z; +/// }; +/// +/// template +/// class Lol { +/// // No use of W, but here's a use of T. +/// T t; +/// }; +/// +/// template +/// class Wtf { +/// // X is not used because W is not used. +/// Lol lololol; +/// }; +/// }; +/// +/// class Qux { +/// int y; +/// }; +/// ``` +/// +/// The following table depicts the results of each trait method when invoked on +/// each of the declarations above: +/// +/// +------+----------------------+--------------------------+-------------------------+---- +/// |Decl. | self_template_params | num_self_template_params | all_template_parameters | ... +/// +------+----------------------+--------------------------+-------------------------+---- +/// |Foo | T, U | 2 | T, U | ... +/// |Bar | V | 1 | T, U, V | ... +/// |Inner | | 0 | T, U | ... +/// |Lol | W | 1 | T, U, W | ... +/// |Wtf | X | 1 | T, U, X | ... +/// |Qux | | 0 | | ... +/// +------+----------------------+--------------------------+------------------------+---- +/// +/// ----+------+-----+----------------------+ +/// ... |Decl. | ... | used_template_params | +/// ----+------+-----+----------------------+ +/// ... |Foo | ... | T, U | +/// ... |Bar | ... | V | +/// ... |Inner | ... | | +/// ... |Lol | ... | T | +/// ... |Wtf | ... | T | +/// ... |Qux | ... | | +/// ----+------+-----+----------------------+ +pub(crate) trait TemplateParameters: Sized { + /// Get the set of `ItemId`s that make up this template declaration's free + /// template parameters. + /// + /// Note that these might *not* all be named types: C++ allows + /// constant-value template parameters as well as template-template + /// parameters. Of course, Rust does not allow generic parameters to be + /// anything but types, so we must treat them as opaque, and avoid + /// instantiating them. + fn self_template_params(&self, ctx: &BindgenContext) -> Vec; + + /// Get the number of free template parameters this template declaration + /// has. + fn num_self_template_params(&self, ctx: &BindgenContext) -> usize { + self.self_template_params(ctx).len() + } + + /// Get the complete set of template parameters that can affect this + /// declaration. + /// + /// Note that this item doesn't need to be a template declaration itself for + /// `Some` to be returned here (in contrast to `self_template_params`). If + /// this item is a member of a template declaration, then the parent's + /// template parameters are included here. + /// + /// In the example above, `Inner` depends on both of the `T` and `U` type + /// parameters, even though it is not itself a template declaration and + /// therefore has no type parameters itself. Perhaps it helps to think about + /// how we would fully reference such a member type in C++: + /// `Foo::Inner`. `Foo` *must* be instantiated with template + /// arguments before we can gain access to the `Inner` member type. + fn all_template_params(&self, ctx: &BindgenContext) -> Vec + where + Self: ItemAncestors, + { + let mut ancestors: Vec<_> = self.ancestors(ctx).collect(); + ancestors.reverse(); + ancestors + .into_iter() + .flat_map(|id| id.self_template_params(ctx).into_iter()) + .collect() + } + + /// Get only the set of template parameters that this item uses. This is a + /// subset of `all_template_params` and does not necessarily contain any of + /// `self_template_params`. + fn used_template_params(&self, ctx: &BindgenContext) -> Vec + where + Self: AsRef, + { + assert!( + ctx.in_codegen_phase(), + "template parameter usage is not computed until codegen" + ); + + let id = *self.as_ref(); + ctx.resolve_item(id) + .all_template_params(ctx) + .into_iter() + .filter(|p| ctx.uses_template_parameter(id, *p)) + .collect() + } +} + +/// A trait for things which may or may not be a named template type parameter. +pub(crate) trait AsTemplateParam { + /// Any extra information the implementor might need to make this decision. + type Extra; + + /// Convert this thing to the item ID of a named template type parameter. + fn as_template_param( + &self, + ctx: &BindgenContext, + extra: &Self::Extra, + ) -> Option; + + /// Is this a named template type parameter? + fn is_template_param( + &self, + ctx: &BindgenContext, + extra: &Self::Extra, + ) -> bool { + self.as_template_param(ctx, extra).is_some() + } +} + +/// A concrete instantiation of a generic template. +#[derive(Clone, Debug)] +pub(crate) struct TemplateInstantiation { + /// The template definition which this is instantiating. + definition: TypeId, + /// The concrete template arguments, which will be substituted in the + /// definition for the generic template parameters. + args: Vec, +} + +impl TemplateInstantiation { + /// Construct a new template instantiation from the given parts. + pub(crate) fn new(definition: TypeId, args: I) -> TemplateInstantiation + where + I: IntoIterator, + { + TemplateInstantiation { + definition, + args: args.into_iter().collect(), + } + } + + /// Get the template definition for this instantiation. + pub(crate) fn template_definition(&self) -> TypeId { + self.definition + } + + /// Get the concrete template arguments used in this instantiation. + pub(crate) fn template_arguments(&self) -> &[TypeId] { + &self.args[..] + } + + /// Parse a `TemplateInstantiation` from a clang `Type`. + pub(crate) fn from_ty( + ty: &clang::Type, + ctx: &mut BindgenContext, + ) -> Option { + use clang_sys::*; + + let template_args = ty.template_args().map_or(vec![], |args| match ty + .canonical_type() + .template_args() + { + Some(canonical_args) => { + let arg_count = args.len(); + args.chain(canonical_args.skip(arg_count)) + .filter(|t| t.kind() != CXType_Invalid) + .map(|t| { + Item::from_ty_or_ref(t, t.declaration(), None, ctx) + }) + .collect() + } + None => args + .filter(|t| t.kind() != CXType_Invalid) + .map(|t| Item::from_ty_or_ref(t, t.declaration(), None, ctx)) + .collect(), + }); + + let declaration = ty.declaration(); + let definition = if declaration.kind() == CXCursor_TypeAliasTemplateDecl + { + Some(declaration) + } else { + declaration.specialized().or_else(|| { + let mut template_ref = None; + ty.declaration().visit(|child| { + if child.kind() == CXCursor_TemplateRef { + template_ref = Some(child); + return CXVisit_Break; + } + + // Instantiations of template aliases might have the + // TemplateRef to the template alias definition arbitrarily + // deep, so we need to recurse here and not only visit + // direct children. + CXChildVisit_Recurse + }); + + template_ref.and_then(|cur| cur.referenced()) + }) + }; + + let definition = match definition { + Some(def) => def, + None => { + if !ty.declaration().is_builtin() { + warn!( + "Could not find template definition for template \ + instantiation" + ); + } + return None; + } + }; + + let template_definition = + Item::from_ty_or_ref(definition.cur_type(), definition, None, ctx); + + Some(TemplateInstantiation::new( + template_definition, + template_args, + )) + } +} + +impl IsOpaque for TemplateInstantiation { + type Extra = Item; + + /// Is this an opaque template instantiation? + fn is_opaque(&self, ctx: &BindgenContext, item: &Item) -> bool { + if self.template_definition().is_opaque(ctx, &()) { + return true; + } + + // TODO(#774): This doesn't properly handle opaque instantiations where + // an argument is itself an instantiation because `canonical_name` does + // not insert the template arguments into the name, ie it for nested + // template arguments it creates "Foo" instead of "Foo". The fully + // correct fix is to make `canonical_{name,path}` include template + // arguments properly. + + let mut path = item.path_for_allowlisting(ctx).clone(); + let args: Vec<_> = self + .template_arguments() + .iter() + .map(|arg| { + let arg_path = + ctx.resolve_item(*arg).path_for_allowlisting(ctx); + arg_path[1..].join("::") + }) + .collect(); + { + let last = path.last_mut().unwrap(); + last.push('<'); + last.push_str(&args.join(", ")); + last.push('>'); + } + + ctx.opaque_by_name(&path) + } +} + +impl Trace for TemplateInstantiation { + type Extra = (); + + fn trace(&self, _ctx: &BindgenContext, tracer: &mut T, _: &()) + where + T: Tracer, + { + tracer + .visit_kind(self.definition.into(), EdgeKind::TemplateDeclaration); + for arg in self.template_arguments() { + tracer.visit_kind(arg.into(), EdgeKind::TemplateArgument); + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/traversal.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/traversal.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/traversal.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/traversal.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,479 @@ +//! Traversal of the graph of IR items and types. + +use super::context::{BindgenContext, ItemId}; +use super::item::ItemSet; +use std::collections::{BTreeMap, VecDeque}; + +/// An outgoing edge in the IR graph is a reference from some item to another +/// item: +/// +/// from --> to +/// +/// The `from` is left implicit: it is the concrete `Trace` implementer which +/// yielded this outgoing edge. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub(crate) struct Edge { + to: ItemId, + kind: EdgeKind, +} + +impl Edge { + /// Construct a new edge whose referent is `to` and is of the given `kind`. + pub(crate) fn new(to: ItemId, kind: EdgeKind) -> Edge { + Edge { to, kind } + } +} + +impl From for ItemId { + fn from(val: Edge) -> Self { + val.to + } +} + +/// The kind of edge reference. This is useful when we wish to only consider +/// certain kinds of edges for a particular traversal or analysis. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub(crate) enum EdgeKind { + /// A generic, catch-all edge. + Generic, + + /// An edge from a template declaration, to the definition of a named type + /// parameter. For example, the edge from `Foo` to `T` in the following + /// snippet: + /// + /// ```C++ + /// template + /// class Foo { }; + /// ``` + TemplateParameterDefinition, + + /// An edge from a template instantiation to the template declaration that + /// is being instantiated. For example, the edge from `Foo` to + /// to `Foo`: + /// + /// ```C++ + /// template + /// class Foo { }; + /// + /// using Bar = Foo; + /// ``` + TemplateDeclaration, + + /// An edge from a template instantiation to its template argument. For + /// example, `Foo` to `Bar`: + /// + /// ```C++ + /// template + /// class Foo { }; + /// + /// class Bar { }; + /// + /// using FooBar = Foo; + /// ``` + TemplateArgument, + + /// An edge from a compound type to one of its base member types. For + /// example, the edge from `Bar` to `Foo`: + /// + /// ```C++ + /// class Foo { }; + /// + /// class Bar : public Foo { }; + /// ``` + BaseMember, + + /// An edge from a compound type to the types of one of its fields. For + /// example, the edge from `Foo` to `int`: + /// + /// ```C++ + /// class Foo { + /// int x; + /// }; + /// ``` + Field, + + /// An edge from an class or struct type to an inner type member. For + /// example, the edge from `Foo` to `Foo::Bar` here: + /// + /// ```C++ + /// class Foo { + /// struct Bar { }; + /// }; + /// ``` + InnerType, + + /// An edge from an class or struct type to an inner static variable. For + /// example, the edge from `Foo` to `Foo::BAR` here: + /// + /// ```C++ + /// class Foo { + /// static const char* BAR; + /// }; + /// ``` + InnerVar, + + /// An edge from a class or struct type to one of its method functions. For + /// example, the edge from `Foo` to `Foo::bar`: + /// + /// ```C++ + /// class Foo { + /// bool bar(int x, int y); + /// }; + /// ``` + Method, + + /// An edge from a class or struct type to one of its constructor + /// functions. For example, the edge from `Foo` to `Foo::Foo(int x, int y)`: + /// + /// ```C++ + /// class Foo { + /// int my_x; + /// int my_y; + /// + /// public: + /// Foo(int x, int y); + /// }; + /// ``` + Constructor, + + /// An edge from a class or struct type to its destructor function. For + /// example, the edge from `Doggo` to `Doggo::~Doggo()`: + /// + /// ```C++ + /// struct Doggo { + /// char* wow; + /// + /// public: + /// ~Doggo(); + /// }; + /// ``` + Destructor, + + /// An edge from a function declaration to its return type. For example, the + /// edge from `foo` to `int`: + /// + /// ```C++ + /// int foo(char* string); + /// ``` + FunctionReturn, + + /// An edge from a function declaration to one of its parameter types. For + /// example, the edge from `foo` to `char*`: + /// + /// ```C++ + /// int foo(char* string); + /// ``` + FunctionParameter, + + /// An edge from a static variable to its type. For example, the edge from + /// `FOO` to `const char*`: + /// + /// ```C++ + /// static const char* FOO; + /// ``` + VarType, + + /// An edge from a non-templated alias or typedef to the referenced type. + TypeReference, +} + +/// A predicate to allow visiting only sub-sets of the whole IR graph by +/// excluding certain edges from being followed by the traversal. +/// +/// The predicate must return true if the traversal should follow this edge +/// and visit everything that is reachable through it. +pub(crate) type TraversalPredicate = + for<'a> fn(&'a BindgenContext, Edge) -> bool; + +/// A `TraversalPredicate` implementation that follows all edges, and therefore +/// traversals using this predicate will see the whole IR graph reachable from +/// the traversal's roots. +pub(crate) fn all_edges(_: &BindgenContext, _: Edge) -> bool { + true +} + +/// A `TraversalPredicate` implementation that only follows +/// `EdgeKind::InnerType` edges, and therefore traversals using this predicate +/// will only visit the traversal's roots and their inner types. This is used +/// in no-recursive-allowlist mode, where inner types such as anonymous +/// structs/unions still need to be processed. +pub(crate) fn only_inner_type_edges(_: &BindgenContext, edge: Edge) -> bool { + edge.kind == EdgeKind::InnerType +} + +/// A `TraversalPredicate` implementation that only follows edges to items that +/// are enabled for code generation. This lets us skip considering items for +/// which are not reachable from code generation. +pub(crate) fn codegen_edges(ctx: &BindgenContext, edge: Edge) -> bool { + let cc = &ctx.options().codegen_config; + match edge.kind { + EdgeKind::Generic => { + ctx.resolve_item(edge.to).is_enabled_for_codegen(ctx) + } + + // We statically know the kind of item that non-generic edges can point + // to, so we don't need to actually resolve the item and check + // `Item::is_enabled_for_codegen`. + EdgeKind::TemplateParameterDefinition | + EdgeKind::TemplateArgument | + EdgeKind::TemplateDeclaration | + EdgeKind::BaseMember | + EdgeKind::Field | + EdgeKind::InnerType | + EdgeKind::FunctionReturn | + EdgeKind::FunctionParameter | + EdgeKind::VarType | + EdgeKind::TypeReference => cc.types(), + EdgeKind::InnerVar => cc.vars(), + EdgeKind::Method => cc.methods(), + EdgeKind::Constructor => cc.constructors(), + EdgeKind::Destructor => cc.destructors(), + } +} + +/// The storage for the set of items that have been seen (although their +/// outgoing edges might not have been fully traversed yet) in an active +/// traversal. +pub(crate) trait TraversalStorage<'ctx> { + /// Construct a new instance of this TraversalStorage, for a new traversal. + fn new(ctx: &'ctx BindgenContext) -> Self; + + /// Add the given item to the storage. If the item has never been seen + /// before, return `true`. Otherwise, return `false`. + /// + /// The `from` item is the item from which we discovered this item, or is + /// `None` if this item is a root. + fn add(&mut self, from: Option, item: ItemId) -> bool; +} + +impl<'ctx> TraversalStorage<'ctx> for ItemSet { + fn new(_: &'ctx BindgenContext) -> Self { + ItemSet::new() + } + + fn add(&mut self, _: Option, item: ItemId) -> bool { + self.insert(item) + } +} + +/// A `TraversalStorage` implementation that keeps track of how we first reached +/// each item. This is useful for providing debug assertions with meaningful +/// diagnostic messages about dangling items. +#[derive(Debug)] +pub(crate) struct Paths<'ctx>(BTreeMap, &'ctx BindgenContext); + +impl<'ctx> TraversalStorage<'ctx> for Paths<'ctx> { + fn new(ctx: &'ctx BindgenContext) -> Self { + Paths(BTreeMap::new(), ctx) + } + + fn add(&mut self, from: Option, item: ItemId) -> bool { + let newly_discovered = + self.0.insert(item, from.unwrap_or(item)).is_none(); + + if self.1.resolve_item_fallible(item).is_none() { + let mut path = vec![]; + let mut current = item; + loop { + let predecessor = *self.0.get(¤t).expect( + "We know we found this item id, so it must have a \ + predecessor", + ); + if predecessor == current { + break; + } + path.push(predecessor); + current = predecessor; + } + path.reverse(); + panic!( + "Found reference to dangling id = {:?}\nvia path = {:?}", + item, path + ); + } + + newly_discovered + } +} + +/// The queue of seen-but-not-yet-traversed items. +/// +/// Using a FIFO queue with a traversal will yield a breadth-first traversal, +/// while using a LIFO queue will result in a depth-first traversal of the IR +/// graph. +pub(crate) trait TraversalQueue: Default { + /// Add a newly discovered item to the queue. + fn push(&mut self, item: ItemId); + + /// Pop the next item to traverse, if any. + fn next(&mut self) -> Option; +} + +impl TraversalQueue for Vec { + fn push(&mut self, item: ItemId) { + self.push(item); + } + + fn next(&mut self) -> Option { + self.pop() + } +} + +impl TraversalQueue for VecDeque { + fn push(&mut self, item: ItemId) { + self.push_back(item); + } + + fn next(&mut self) -> Option { + self.pop_front() + } +} + +/// Something that can receive edges from a `Trace` implementation. +pub(crate) trait Tracer { + /// Note an edge between items. Called from within a `Trace` implementation. + fn visit_kind(&mut self, item: ItemId, kind: EdgeKind); + + /// A synonym for `tracer.visit_kind(item, EdgeKind::Generic)`. + fn visit(&mut self, item: ItemId) { + self.visit_kind(item, EdgeKind::Generic); + } +} + +impl Tracer for F +where + F: FnMut(ItemId, EdgeKind), +{ + fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) { + (*self)(item, kind) + } +} + +/// Trace all of the outgoing edges to other items. Implementations should call +/// one of `tracer.visit(edge)` or `tracer.visit_kind(edge, EdgeKind::Whatever)` +/// for each of their outgoing edges. +pub(crate) trait Trace { + /// If a particular type needs extra information beyond what it has in + /// `self` and `context` to find its referenced items, its implementation + /// can define this associated type, forcing callers to pass the needed + /// information through. + type Extra; + + /// Trace all of this item's outgoing edges to other items. + fn trace( + &self, + context: &BindgenContext, + tracer: &mut T, + extra: &Self::Extra, + ) where + T: Tracer; +} + +/// An graph traversal of the transitive closure of references between items. +/// +/// See `BindgenContext::allowlisted_items` for more information. +pub(crate) struct ItemTraversal<'ctx, Storage, Queue> +where + Storage: TraversalStorage<'ctx>, + Queue: TraversalQueue, +{ + ctx: &'ctx BindgenContext, + + /// The set of items we have seen thus far in this traversal. + seen: Storage, + + /// The set of items that we have seen, but have yet to traverse. + queue: Queue, + + /// The predicate that determines which edges this traversal will follow. + predicate: TraversalPredicate, + + /// The item we are currently traversing. + currently_traversing: Option, +} + +impl<'ctx, Storage, Queue> ItemTraversal<'ctx, Storage, Queue> +where + Storage: TraversalStorage<'ctx>, + Queue: TraversalQueue, +{ + /// Begin a new traversal, starting from the given roots. + pub(crate) fn new( + ctx: &'ctx BindgenContext, + roots: R, + predicate: TraversalPredicate, + ) -> ItemTraversal<'ctx, Storage, Queue> + where + R: IntoIterator, + { + let mut seen = Storage::new(ctx); + let mut queue = Queue::default(); + + for id in roots { + seen.add(None, id); + queue.push(id); + } + + ItemTraversal { + ctx, + seen, + queue, + predicate, + currently_traversing: None, + } + } +} + +impl<'ctx, Storage, Queue> Tracer for ItemTraversal<'ctx, Storage, Queue> +where + Storage: TraversalStorage<'ctx>, + Queue: TraversalQueue, +{ + fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) { + let edge = Edge::new(item, kind); + if !(self.predicate)(self.ctx, edge) { + return; + } + + let is_newly_discovered = + self.seen.add(self.currently_traversing, item); + if is_newly_discovered { + self.queue.push(item) + } + } +} + +impl<'ctx, Storage, Queue> Iterator for ItemTraversal<'ctx, Storage, Queue> +where + Storage: TraversalStorage<'ctx>, + Queue: TraversalQueue, +{ + type Item = ItemId; + + fn next(&mut self) -> Option { + let id = self.queue.next()?; + + let newly_discovered = self.seen.add(None, id); + debug_assert!( + !newly_discovered, + "should have already seen anything we get out of our queue" + ); + debug_assert!( + self.ctx.resolve_item_fallible(id).is_some(), + "should only get IDs of actual items in our context during traversal" + ); + + self.currently_traversing = Some(id); + id.trace(self.ctx, self, &()); + self.currently_traversing = None; + + Some(id) + } +} + +/// An iterator to find any dangling items. +/// +/// See `BindgenContext::assert_no_dangling_item_traversal` for more +/// information. +pub(crate) type AssertNoDanglingItemsTraversal<'ctx> = + ItemTraversal<'ctx, Paths<'ctx>, VecDeque>; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/ty.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/ty.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/ty.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/ty.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,1273 @@ +//! Everything related to types in our intermediate representation. + +use super::comp::CompInfo; +use super::context::{BindgenContext, ItemId, TypeId}; +use super::dot::DotAttributes; +use super::enum_ty::Enum; +use super::function::FunctionSig; +use super::int::IntKind; +use super::item::{IsOpaque, Item}; +use super::layout::{Layout, Opaque}; +use super::objc::ObjCInterface; +use super::template::{ + AsTemplateParam, TemplateInstantiation, TemplateParameters, +}; +use super::traversal::{EdgeKind, Trace, Tracer}; +use crate::clang::{self, Cursor}; +use crate::parse::{ParseError, ParseResult}; +use std::borrow::Cow; +use std::io; + +/// The base representation of a type in bindgen. +/// +/// A type has an optional name, which if present cannot be empty, a `layout` +/// (size, alignment and packedness) if known, a `Kind`, which determines which +/// kind of type it is, and whether the type is const. +#[derive(Debug)] +pub(crate) struct Type { + /// The name of the type, or None if it was an unnamed struct or union. + name: Option, + /// The layout of the type, if known. + layout: Option, + /// The inner kind of the type + kind: TypeKind, + /// Whether this type is const-qualified. + is_const: bool, +} + +/// The maximum number of items in an array for which Rust implements common +/// traits, and so if we have a type containing an array with more than this +/// many items, we won't be able to derive common traits on that type. +/// +pub(crate) const RUST_DERIVE_IN_ARRAY_LIMIT: usize = 32; + +impl Type { + /// Get the underlying `CompInfo` for this type as a mutable reference, or + /// `None` if this is some other kind of type. + pub(crate) fn as_comp_mut(&mut self) -> Option<&mut CompInfo> { + match self.kind { + TypeKind::Comp(ref mut ci) => Some(ci), + _ => None, + } + } + + /// Construct a new `Type`. + pub(crate) fn new( + name: Option, + layout: Option, + kind: TypeKind, + is_const: bool, + ) -> Self { + Type { + name, + layout, + kind, + is_const, + } + } + + /// Which kind of type is this? + pub(crate) fn kind(&self) -> &TypeKind { + &self.kind + } + + /// Get a mutable reference to this type's kind. + pub(crate) fn kind_mut(&mut self) -> &mut TypeKind { + &mut self.kind + } + + /// Get this type's name. + pub(crate) fn name(&self) -> Option<&str> { + self.name.as_deref() + } + + /// Whether this is a block pointer type. + pub(crate) fn is_block_pointer(&self) -> bool { + matches!(self.kind, TypeKind::BlockPointer(..)) + } + + /// Is this an integer type, including `bool` or `char`? + pub(crate) fn is_int(&self) -> bool { + matches!(self.kind, TypeKind::Int(_)) + } + + /// Is this a compound type? + pub(crate) fn is_comp(&self) -> bool { + matches!(self.kind, TypeKind::Comp(..)) + } + + /// Is this a union? + pub(crate) fn is_union(&self) -> bool { + match self.kind { + TypeKind::Comp(ref comp) => comp.is_union(), + _ => false, + } + } + + /// Is this type of kind `TypeKind::TypeParam`? + pub(crate) fn is_type_param(&self) -> bool { + matches!(self.kind, TypeKind::TypeParam) + } + + /// Is this a template instantiation type? + pub(crate) fn is_template_instantiation(&self) -> bool { + matches!(self.kind, TypeKind::TemplateInstantiation(..)) + } + + /// Is this a function type? + pub(crate) fn is_function(&self) -> bool { + matches!(self.kind, TypeKind::Function(..)) + } + + /// Is this an enum type? + pub(crate) fn is_enum(&self) -> bool { + matches!(self.kind, TypeKind::Enum(..)) + } + + /// Is this either a builtin or named type? + pub(crate) fn is_builtin_or_type_param(&self) -> bool { + matches!( + self.kind, + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Function(..) | + TypeKind::Array(..) | + TypeKind::Reference(..) | + TypeKind::Pointer(..) | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::TypeParam + ) + } + + /// Creates a new named type, with name `name`. + pub(crate) fn named(name: String) -> Self { + let name = if name.is_empty() { None } else { Some(name) }; + Self::new(name, None, TypeKind::TypeParam, false) + } + + /// Is this a floating point type? + pub(crate) fn is_float(&self) -> bool { + matches!(self.kind, TypeKind::Float(..)) + } + + /// Is this a boolean type? + pub(crate) fn is_bool(&self) -> bool { + matches!(self.kind, TypeKind::Int(IntKind::Bool)) + } + + /// Is this an integer type? + pub(crate) fn is_integer(&self) -> bool { + matches!(self.kind, TypeKind::Int(..)) + } + + /// Cast this type to an integer kind, or `None` if it is not an integer + /// type. + pub(crate) fn as_integer(&self) -> Option { + match self.kind { + TypeKind::Int(int_kind) => Some(int_kind), + _ => None, + } + } + + /// Is this a `const` qualified type? + pub(crate) fn is_const(&self) -> bool { + self.is_const + } + + /// Is this an unresolved reference? + pub(crate) fn is_unresolved_ref(&self) -> bool { + matches!(self.kind, TypeKind::UnresolvedTypeRef(_, _, _)) + } + + /// Is this a incomplete array type? + pub(crate) fn is_incomplete_array( + &self, + ctx: &BindgenContext, + ) -> Option { + match self.kind { + TypeKind::Array(item, len) => { + if len == 0 { + Some(item.into()) + } else { + None + } + } + TypeKind::ResolvedTypeRef(inner) => { + ctx.resolve_type(inner).is_incomplete_array(ctx) + } + _ => None, + } + } + + /// What is the layout of this type? + pub(crate) fn layout(&self, ctx: &BindgenContext) -> Option { + self.layout.or_else(|| { + match self.kind { + TypeKind::Comp(ref ci) => ci.layout(ctx), + TypeKind::Array(inner, length) if length == 0 => Some( + Layout::new(0, ctx.resolve_type(inner).layout(ctx)?.align), + ), + // FIXME(emilio): This is a hack for anonymous union templates. + // Use the actual pointer size! + TypeKind::Pointer(..) => Some(Layout::new( + ctx.target_pointer_size(), + ctx.target_pointer_size(), + )), + TypeKind::ResolvedTypeRef(inner) => { + ctx.resolve_type(inner).layout(ctx) + } + _ => None, + } + }) + } + + /// Whether this named type is an invalid C++ identifier. This is done to + /// avoid generating invalid code with some cases we can't handle, see: + /// + /// tests/headers/381-decltype-alias.hpp + pub(crate) fn is_invalid_type_param(&self) -> bool { + match self.kind { + TypeKind::TypeParam => { + let name = self.name().expect("Unnamed named type?"); + !clang::is_valid_identifier(name) + } + _ => false, + } + } + + /// Takes `name`, and returns a suitable identifier representation for it. + fn sanitize_name(name: &str) -> Cow { + if clang::is_valid_identifier(name) { + return Cow::Borrowed(name); + } + + let name = name.replace(|c| c == ' ' || c == ':' || c == '.', "_"); + Cow::Owned(name) + } + + /// Get this type's santizied name. + pub(crate) fn sanitized_name<'a>( + &'a self, + ctx: &BindgenContext, + ) -> Option> { + let name_info = match *self.kind() { + TypeKind::Pointer(inner) => Some((inner, Cow::Borrowed("ptr"))), + TypeKind::Reference(inner) => Some((inner, Cow::Borrowed("ref"))), + TypeKind::Array(inner, length) => { + Some((inner, format!("array{}", length).into())) + } + _ => None, + }; + if let Some((inner, prefix)) = name_info { + ctx.resolve_item(inner) + .expect_type() + .sanitized_name(ctx) + .map(|name| format!("{}_{}", prefix, name).into()) + } else { + self.name().map(Self::sanitize_name) + } + } + + /// See safe_canonical_type. + pub(crate) fn canonical_type<'tr>( + &'tr self, + ctx: &'tr BindgenContext, + ) -> &'tr Type { + self.safe_canonical_type(ctx) + .expect("Should have been resolved after parsing!") + } + + /// Returns the canonical type of this type, that is, the "inner type". + /// + /// For example, for a `typedef`, the canonical type would be the + /// `typedef`ed type, for a template instantiation, would be the template + /// its specializing, and so on. Return None if the type is unresolved. + pub(crate) fn safe_canonical_type<'tr>( + &'tr self, + ctx: &'tr BindgenContext, + ) -> Option<&'tr Type> { + match self.kind { + TypeKind::TypeParam | + TypeKind::Array(..) | + TypeKind::Vector(..) | + TypeKind::Comp(..) | + TypeKind::Opaque | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Complex(..) | + TypeKind::Function(..) | + TypeKind::Enum(..) | + TypeKind::Reference(..) | + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Pointer(..) | + TypeKind::BlockPointer(..) | + TypeKind::ObjCId | + TypeKind::ObjCSel | + TypeKind::ObjCInterface(..) => Some(self), + + TypeKind::ResolvedTypeRef(inner) | + TypeKind::Alias(inner) | + TypeKind::TemplateAlias(inner, _) => { + ctx.resolve_type(inner).safe_canonical_type(ctx) + } + TypeKind::TemplateInstantiation(ref inst) => ctx + .resolve_type(inst.template_definition()) + .safe_canonical_type(ctx), + + TypeKind::UnresolvedTypeRef(..) => None, + } + } + + /// There are some types we don't want to stop at when finding an opaque + /// item, so we can arrive to the proper item that needs to be generated. + pub(crate) fn should_be_traced_unconditionally(&self) -> bool { + matches!( + self.kind, + TypeKind::Comp(..) | + TypeKind::Function(..) | + TypeKind::Pointer(..) | + TypeKind::Array(..) | + TypeKind::Reference(..) | + TypeKind::TemplateInstantiation(..) | + TypeKind::ResolvedTypeRef(..) + ) + } +} + +impl IsOpaque for Type { + type Extra = Item; + + fn is_opaque(&self, ctx: &BindgenContext, item: &Item) -> bool { + match self.kind { + TypeKind::Opaque => true, + TypeKind::TemplateInstantiation(ref inst) => { + inst.is_opaque(ctx, item) + } + TypeKind::Comp(ref comp) => comp.is_opaque(ctx, &self.layout), + TypeKind::ResolvedTypeRef(to) => to.is_opaque(ctx, &()), + _ => false, + } + } +} + +impl AsTemplateParam for Type { + type Extra = Item; + + fn as_template_param( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> Option { + self.kind.as_template_param(ctx, item) + } +} + +impl AsTemplateParam for TypeKind { + type Extra = Item; + + fn as_template_param( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> Option { + match *self { + TypeKind::TypeParam => Some(item.id().expect_type_id(ctx)), + TypeKind::ResolvedTypeRef(id) => id.as_template_param(ctx, &()), + _ => None, + } + } +} + +impl DotAttributes for Type { + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + if let Some(ref layout) = self.layout { + writeln!( + out, + "size{} + align{}", + layout.size, layout.align + )?; + if layout.packed { + writeln!(out, "packedtrue")?; + } + } + + if self.is_const { + writeln!(out, "consttrue")?; + } + + self.kind.dot_attributes(ctx, out) + } +} + +impl DotAttributes for TypeKind { + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!( + out, + "type kind{}", + self.kind_name() + )?; + + if let TypeKind::Comp(ref comp) = *self { + comp.dot_attributes(ctx, out)?; + } + + Ok(()) + } +} + +impl TypeKind { + fn kind_name(&self) -> &'static str { + match *self { + TypeKind::Void => "Void", + TypeKind::NullPtr => "NullPtr", + TypeKind::Comp(..) => "Comp", + TypeKind::Opaque => "Opaque", + TypeKind::Int(..) => "Int", + TypeKind::Float(..) => "Float", + TypeKind::Complex(..) => "Complex", + TypeKind::Alias(..) => "Alias", + TypeKind::TemplateAlias(..) => "TemplateAlias", + TypeKind::Array(..) => "Array", + TypeKind::Vector(..) => "Vector", + TypeKind::Function(..) => "Function", + TypeKind::Enum(..) => "Enum", + TypeKind::Pointer(..) => "Pointer", + TypeKind::BlockPointer(..) => "BlockPointer", + TypeKind::Reference(..) => "Reference", + TypeKind::TemplateInstantiation(..) => "TemplateInstantiation", + TypeKind::UnresolvedTypeRef(..) => "UnresolvedTypeRef", + TypeKind::ResolvedTypeRef(..) => "ResolvedTypeRef", + TypeKind::TypeParam => "TypeParam", + TypeKind::ObjCInterface(..) => "ObjCInterface", + TypeKind::ObjCId => "ObjCId", + TypeKind::ObjCSel => "ObjCSel", + } + } +} + +#[test] +fn is_invalid_type_param_valid() { + let ty = Type::new(Some("foo".into()), None, TypeKind::TypeParam, false); + assert!(!ty.is_invalid_type_param()) +} + +#[test] +fn is_invalid_type_param_valid_underscore_and_numbers() { + let ty = Type::new( + Some("_foo123456789_".into()), + None, + TypeKind::TypeParam, + false, + ); + assert!(!ty.is_invalid_type_param()) +} + +#[test] +fn is_invalid_type_param_valid_unnamed_kind() { + let ty = Type::new(Some("foo".into()), None, TypeKind::Void, false); + assert!(!ty.is_invalid_type_param()) +} + +#[test] +fn is_invalid_type_param_invalid_start() { + let ty = Type::new(Some("1foo".into()), None, TypeKind::TypeParam, false); + assert!(ty.is_invalid_type_param()) +} + +#[test] +fn is_invalid_type_param_invalid_remaing() { + let ty = Type::new(Some("foo-".into()), None, TypeKind::TypeParam, false); + assert!(ty.is_invalid_type_param()) +} + +#[test] +#[should_panic] +fn is_invalid_type_param_unnamed() { + let ty = Type::new(None, None, TypeKind::TypeParam, false); + assert!(ty.is_invalid_type_param()) +} + +#[test] +fn is_invalid_type_param_empty_name() { + let ty = Type::new(Some("".into()), None, TypeKind::TypeParam, false); + assert!(ty.is_invalid_type_param()) +} + +impl TemplateParameters for Type { + fn self_template_params(&self, ctx: &BindgenContext) -> Vec { + self.kind.self_template_params(ctx) + } +} + +impl TemplateParameters for TypeKind { + fn self_template_params(&self, ctx: &BindgenContext) -> Vec { + match *self { + TypeKind::ResolvedTypeRef(id) => { + ctx.resolve_type(id).self_template_params(ctx) + } + TypeKind::Comp(ref comp) => comp.self_template_params(ctx), + TypeKind::TemplateAlias(_, ref args) => args.clone(), + + TypeKind::Opaque | + TypeKind::TemplateInstantiation(..) | + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(_) | + TypeKind::Float(_) | + TypeKind::Complex(_) | + TypeKind::Array(..) | + TypeKind::Vector(..) | + TypeKind::Function(_) | + TypeKind::Enum(_) | + TypeKind::Pointer(_) | + TypeKind::BlockPointer(_) | + TypeKind::Reference(_) | + TypeKind::UnresolvedTypeRef(..) | + TypeKind::TypeParam | + TypeKind::Alias(_) | + TypeKind::ObjCId | + TypeKind::ObjCSel | + TypeKind::ObjCInterface(_) => vec![], + } + } +} + +/// The kind of float this type represents. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum FloatKind { + /// A `float`. + Float, + /// A `double`. + Double, + /// A `long double`. + LongDouble, + /// A `__float128`. + Float128, +} + +/// The different kinds of types that we can parse. +#[derive(Debug)] +pub(crate) enum TypeKind { + /// The void type. + Void, + + /// The `nullptr_t` type. + NullPtr, + + /// A compound type, that is, a class, struct, or union. + Comp(CompInfo), + + /// An opaque type that we just don't understand. All usage of this shoulf + /// result in an opaque blob of bytes generated from the containing type's + /// layout. + Opaque, + + /// An integer type, of a given kind. `bool` and `char` are also considered + /// integers. + Int(IntKind), + + /// A floating point type. + Float(FloatKind), + + /// A complex floating point type. + Complex(FloatKind), + + /// A type alias, with a name, that points to another type. + Alias(TypeId), + + /// A templated alias, pointing to an inner type, just as `Alias`, but with + /// template parameters. + TemplateAlias(TypeId, Vec), + + /// A packed vector type: element type, number of elements + Vector(TypeId, usize), + + /// An array of a type and a length. + Array(TypeId, usize), + + /// A function type, with a given signature. + Function(FunctionSig), + + /// An `enum` type. + Enum(Enum), + + /// A pointer to a type. The bool field represents whether it's const or + /// not. + Pointer(TypeId), + + /// A pointer to an Apple block. + BlockPointer(TypeId), + + /// A reference to a type, as in: int& foo(). + Reference(TypeId), + + /// An instantiation of an abstract template definition with a set of + /// concrete template arguments. + TemplateInstantiation(TemplateInstantiation), + + /// A reference to a yet-to-resolve type. This stores the clang cursor + /// itself, and postpones its resolution. + /// + /// These are gone in a phase after parsing where these are mapped to + /// already known types, and are converted to ResolvedTypeRef. + /// + /// see tests/headers/typeref.hpp to see somewhere where this is a problem. + UnresolvedTypeRef( + clang::Type, + clang::Cursor, + /* parent_id */ + Option, + ), + + /// An indirection to another type. + /// + /// These are generated after we resolve a forward declaration, or when we + /// replace one type with another. + ResolvedTypeRef(TypeId), + + /// A named type, that is, a template parameter. + TypeParam, + + /// Objective C interface. Always referenced through a pointer + ObjCInterface(ObjCInterface), + + /// Objective C 'id' type, points to any object + ObjCId, + + /// Objective C selector type + ObjCSel, +} + +impl Type { + /// This is another of the nasty methods. This one is the one that takes + /// care of the core logic of converting a clang type to a `Type`. + /// + /// It's sort of nasty and full of special-casing, but hopefully the + /// comments in every special case justify why they're there. + pub(crate) fn from_clang_ty( + potential_id: ItemId, + ty: &clang::Type, + location: Cursor, + parent_id: Option, + ctx: &mut BindgenContext, + ) -> Result, ParseError> { + use clang_sys::*; + { + let already_resolved = ctx.builtin_or_resolved_ty( + potential_id, + parent_id, + ty, + Some(location), + ); + if let Some(ty) = already_resolved { + debug!("{:?} already resolved: {:?}", ty, location); + return Ok(ParseResult::AlreadyResolved(ty.into())); + } + } + + let layout = ty.fallible_layout(ctx).ok(); + let cursor = ty.declaration(); + let is_anonymous = cursor.is_anonymous(); + let mut name = if is_anonymous { + None + } else { + Some(cursor.spelling()).filter(|n| !n.is_empty()) + }; + + debug!( + "from_clang_ty: {:?}, ty: {:?}, loc: {:?}", + potential_id, ty, location + ); + debug!("currently_parsed_types: {:?}", ctx.currently_parsed_types()); + + let canonical_ty = ty.canonical_type(); + + // Parse objc protocols as if they were interfaces + let mut ty_kind = ty.kind(); + match location.kind() { + CXCursor_ObjCProtocolDecl | CXCursor_ObjCCategoryDecl => { + ty_kind = CXType_ObjCInterface + } + _ => {} + } + + // Objective C template type parameter + // FIXME: This is probably wrong, we are attempting to find the + // objc template params, which seem to manifest as a typedef. + // We are rewriting them as ID to suppress multiple conflicting + // typedefs at root level + if ty_kind == CXType_Typedef { + let is_template_type_param = + ty.declaration().kind() == CXCursor_TemplateTypeParameter; + let is_canonical_objcpointer = + canonical_ty.kind() == CXType_ObjCObjectPointer; + + // We have found a template type for objc interface + if is_canonical_objcpointer && is_template_type_param { + // Objective-C generics are just ids with fancy name. + // To keep it simple, just name them ids + name = Some("id".to_owned()); + } + } + + if location.kind() == CXCursor_ClassTemplatePartialSpecialization { + // Sorry! (Not sorry) + warn!( + "Found a partial template specialization; bindgen does not \ + support partial template specialization! Constructing \ + opaque type instead." + ); + return Ok(ParseResult::New( + Opaque::from_clang_ty(&canonical_ty, ctx), + None, + )); + } + + let kind = if location.kind() == CXCursor_TemplateRef || + (ty.template_args().is_some() && ty_kind != CXType_Typedef) + { + // This is a template instantiation. + match TemplateInstantiation::from_ty(ty, ctx) { + Some(inst) => TypeKind::TemplateInstantiation(inst), + None => TypeKind::Opaque, + } + } else { + match ty_kind { + CXType_Unexposed + if *ty != canonical_ty && + canonical_ty.kind() != CXType_Invalid && + ty.ret_type().is_none() && + // Sometime clang desugars some types more than + // what we need, specially with function + // pointers. + // + // We should also try the solution of inverting + // those checks instead of doing this, that is, + // something like: + // + // CXType_Unexposed if ty.ret_type().is_some() + // => { ... } + // + // etc. + !canonical_ty.spelling().contains("type-parameter") => + { + debug!("Looking for canonical type: {:?}", canonical_ty); + return Self::from_clang_ty( + potential_id, + &canonical_ty, + location, + parent_id, + ctx, + ); + } + CXType_Unexposed | CXType_Invalid => { + // For some reason Clang doesn't give us any hint in some + // situations where we should generate a function pointer (see + // tests/headers/func_ptr_in_struct.h), so we do a guess here + // trying to see if it has a valid return type. + if ty.ret_type().is_some() { + let signature = + FunctionSig::from_ty(ty, &location, ctx)?; + TypeKind::Function(signature) + // Same here, with template specialisations we can safely + // assume this is a Comp(..) + } else if ty.is_fully_instantiated_template() { + debug!( + "Template specialization: {:?}, {:?} {:?}", + ty, location, canonical_ty + ); + let complex = CompInfo::from_ty( + potential_id, + ty, + Some(location), + ctx, + ) + .expect("C'mon"); + TypeKind::Comp(complex) + } else { + match location.kind() { + CXCursor_CXXBaseSpecifier | + CXCursor_ClassTemplate => { + if location.kind() == CXCursor_CXXBaseSpecifier + { + // In the case we're parsing a base specifier + // inside an unexposed or invalid type, it means + // that we're parsing one of two things: + // + // * A template parameter. + // * A complex class that isn't exposed. + // + // This means, unfortunately, that there's no + // good way to differentiate between them. + // + // Probably we could try to look at the + // declaration and complicate more this logic, + // but we'll keep it simple... if it's a valid + // C++ identifier, we'll consider it as a + // template parameter. + // + // This is because: + // + // * We expect every other base that is a + // proper identifier (that is, a simple + // struct/union declaration), to be exposed, + // so this path can't be reached in that + // case. + // + // * Quite conveniently, complex base + // specifiers preserve their full names (that + // is: Foo instead of Foo). We can take + // advantage of this. + // + // If we find some edge case where this doesn't + // work (which I guess is unlikely, see the + // different test cases[1][2][3][4]), we'd need + // to find more creative ways of differentiating + // these two cases. + // + // [1]: inherit_named.hpp + // [2]: forward-inherit-struct-with-fields.hpp + // [3]: forward-inherit-struct.hpp + // [4]: inherit-namespaced.hpp + if location.spelling().chars().all(|c| { + c.is_alphanumeric() || c == '_' + }) { + return Err(ParseError::Recurse); + } + } else { + name = Some(location.spelling()); + } + + let complex = CompInfo::from_ty( + potential_id, + ty, + Some(location), + ctx, + ); + match complex { + Ok(complex) => TypeKind::Comp(complex), + Err(_) => { + warn!( + "Could not create complex type \ + from class template or base \ + specifier, using opaque blob" + ); + let opaque = + Opaque::from_clang_ty(ty, ctx); + return Ok(ParseResult::New( + opaque, None, + )); + } + } + } + CXCursor_TypeAliasTemplateDecl => { + debug!("TypeAliasTemplateDecl"); + + // We need to manually unwind this one. + let mut inner = Err(ParseError::Continue); + let mut args = vec![]; + + location.visit(|cur| { + match cur.kind() { + CXCursor_TypeAliasDecl => { + let current = cur.cur_type(); + + debug_assert_eq!( + current.kind(), + CXType_Typedef + ); + + name = Some(location.spelling()); + + let inner_ty = cur + .typedef_type() + .expect("Not valid Type?"); + inner = Ok(Item::from_ty_or_ref( + inner_ty, + cur, + Some(potential_id), + ctx, + )); + } + CXCursor_TemplateTypeParameter => { + let param = Item::type_param( + None, cur, ctx, + ) + .expect( + "Item::type_param shouldn't \ + ever fail if we are looking \ + at a TemplateTypeParameter", + ); + args.push(param); + } + _ => {} + } + CXChildVisit_Continue + }); + + let inner_type = match inner { + Ok(inner) => inner, + Err(..) => { + warn!( + "Failed to parse template alias \ + {:?}", + location + ); + return Err(ParseError::Continue); + } + }; + + TypeKind::TemplateAlias(inner_type, args) + } + CXCursor_TemplateRef => { + let referenced = location.referenced().unwrap(); + let referenced_ty = referenced.cur_type(); + + debug!( + "TemplateRef: location = {:?}; referenced = \ + {:?}; referenced_ty = {:?}", + location, + referenced, + referenced_ty + ); + + return Self::from_clang_ty( + potential_id, + &referenced_ty, + referenced, + parent_id, + ctx, + ); + } + CXCursor_TypeRef => { + let referenced = location.referenced().unwrap(); + let referenced_ty = referenced.cur_type(); + let declaration = referenced_ty.declaration(); + + debug!( + "TypeRef: location = {:?}; referenced = \ + {:?}; referenced_ty = {:?}", + location, referenced, referenced_ty + ); + + let id = Item::from_ty_or_ref_with_id( + potential_id, + referenced_ty, + declaration, + parent_id, + ctx, + ); + return Ok(ParseResult::AlreadyResolved( + id.into(), + )); + } + CXCursor_NamespaceRef => { + return Err(ParseError::Continue); + } + _ => { + if ty.kind() == CXType_Unexposed { + warn!( + "Unexposed type {:?}, recursing inside, \ + loc: {:?}", + ty, + location + ); + return Err(ParseError::Recurse); + } + + warn!("invalid type {:?}", ty); + return Err(ParseError::Continue); + } + } + } + } + CXType_Auto => { + if canonical_ty == *ty { + debug!("Couldn't find deduced type: {:?}", ty); + return Err(ParseError::Continue); + } + + return Self::from_clang_ty( + potential_id, + &canonical_ty, + location, + parent_id, + ctx, + ); + } + // NOTE: We don't resolve pointers eagerly because the pointee type + // might not have been parsed, and if it contains templates or + // something else we might get confused, see the comment inside + // TypeRef. + // + // We might need to, though, if the context is already in the + // process of resolving them. + CXType_ObjCObjectPointer | + CXType_MemberPointer | + CXType_Pointer => { + let mut pointee = ty.pointee_type().unwrap(); + if *ty != canonical_ty { + let canonical_pointee = + canonical_ty.pointee_type().unwrap(); + // clang sometimes loses pointee constness here, see + // #2244. + if canonical_pointee.is_const() != pointee.is_const() { + pointee = canonical_pointee; + } + } + let inner = + Item::from_ty_or_ref(pointee, location, None, ctx); + TypeKind::Pointer(inner) + } + CXType_BlockPointer => { + let pointee = ty.pointee_type().expect("Not valid Type?"); + let inner = + Item::from_ty_or_ref(pointee, location, None, ctx); + TypeKind::BlockPointer(inner) + } + // XXX: RValueReference is most likely wrong, but I don't think we + // can even add bindings for that, so huh. + CXType_RValueReference | CXType_LValueReference => { + let inner = Item::from_ty_or_ref( + ty.pointee_type().unwrap(), + location, + None, + ctx, + ); + TypeKind::Reference(inner) + } + // XXX DependentSizedArray is wrong + CXType_VariableArray | CXType_DependentSizedArray => { + let inner = Item::from_ty( + ty.elem_type().as_ref().unwrap(), + location, + None, + ctx, + ) + .expect("Not able to resolve array element?"); + TypeKind::Pointer(inner) + } + CXType_IncompleteArray => { + let inner = Item::from_ty( + ty.elem_type().as_ref().unwrap(), + location, + None, + ctx, + ) + .expect("Not able to resolve array element?"); + TypeKind::Array(inner, 0) + } + CXType_FunctionNoProto | CXType_FunctionProto => { + let signature = FunctionSig::from_ty(ty, &location, ctx)?; + TypeKind::Function(signature) + } + CXType_Typedef => { + let inner = cursor.typedef_type().expect("Not valid Type?"); + let inner_id = + Item::from_ty_or_ref(inner, location, None, ctx); + if inner_id == potential_id { + warn!( + "Generating oqaque type instead of self-referential \ + typedef"); + // This can happen if we bail out of recursive situations + // within the clang parsing. + TypeKind::Opaque + } else { + // Check if this type definition is an alias to a pointer of a `struct` / + // `union` / `enum` with the same name and add the `_ptr` suffix to it to + // avoid name collisions. + if let Some(ref mut name) = name { + if inner.kind() == CXType_Pointer && + !ctx.options().c_naming + { + let pointee = inner.pointee_type().unwrap(); + if pointee.kind() == CXType_Elaborated && + pointee.declaration().spelling() == *name + { + *name += "_ptr"; + } + } + } + TypeKind::Alias(inner_id) + } + } + CXType_Enum => { + let enum_ = Enum::from_ty(ty, ctx).expect("Not an enum?"); + + if !is_anonymous { + let pretty_name = ty.spelling(); + if clang::is_valid_identifier(&pretty_name) { + name = Some(pretty_name); + } + } + + TypeKind::Enum(enum_) + } + CXType_Record => { + let complex = CompInfo::from_ty( + potential_id, + ty, + Some(location), + ctx, + ) + .expect("Not a complex type?"); + + if !is_anonymous { + // The pretty-printed name may contain typedefed name, + // but may also be "struct (anonymous at .h:1)" + let pretty_name = ty.spelling(); + if clang::is_valid_identifier(&pretty_name) { + name = Some(pretty_name); + } + } + + TypeKind::Comp(complex) + } + CXType_Vector => { + let inner = Item::from_ty( + ty.elem_type().as_ref().unwrap(), + location, + None, + ctx, + )?; + TypeKind::Vector(inner, ty.num_elements().unwrap()) + } + CXType_ConstantArray => { + let inner = Item::from_ty( + ty.elem_type().as_ref().unwrap(), + location, + None, + ctx, + ) + .expect("Not able to resolve array element?"); + TypeKind::Array(inner, ty.num_elements().unwrap()) + } + CXType_Elaborated => { + return Self::from_clang_ty( + potential_id, + &ty.named(), + location, + parent_id, + ctx, + ); + } + CXType_ObjCId => TypeKind::ObjCId, + CXType_ObjCSel => TypeKind::ObjCSel, + CXType_ObjCClass | CXType_ObjCInterface => { + let interface = ObjCInterface::from_ty(&location, ctx) + .expect("Not a valid objc interface?"); + if !is_anonymous { + name = Some(interface.rust_name()); + } + TypeKind::ObjCInterface(interface) + } + CXType_Dependent => { + return Err(ParseError::Continue); + } + _ => { + warn!( + "unsupported type: kind = {:?}; ty = {:?}; at {:?}", + ty.kind(), + ty, + location + ); + return Err(ParseError::Continue); + } + } + }; + + name = name.filter(|n| !n.is_empty()); + + let is_const = ty.is_const() || + (ty.kind() == CXType_ConstantArray && + ty.elem_type() + .map_or(false, |element| element.is_const())); + + let ty = Type::new(name, layout, kind, is_const); + // TODO: maybe declaration.canonical()? + Ok(ParseResult::New(ty, Some(cursor.canonical()))) + } +} + +impl Trace for Type { + type Extra = Item; + + fn trace(&self, context: &BindgenContext, tracer: &mut T, item: &Item) + where + T: Tracer, + { + if self + .name() + .map_or(false, |name| context.is_stdint_type(name)) + { + // These types are special-cased in codegen and don't need to be traversed. + return; + } + match *self.kind() { + TypeKind::Pointer(inner) | + TypeKind::Reference(inner) | + TypeKind::Array(inner, _) | + TypeKind::Vector(inner, _) | + TypeKind::BlockPointer(inner) | + TypeKind::Alias(inner) | + TypeKind::ResolvedTypeRef(inner) => { + tracer.visit_kind(inner.into(), EdgeKind::TypeReference); + } + TypeKind::TemplateAlias(inner, ref template_params) => { + tracer.visit_kind(inner.into(), EdgeKind::TypeReference); + for param in template_params { + tracer.visit_kind( + param.into(), + EdgeKind::TemplateParameterDefinition, + ); + } + } + TypeKind::TemplateInstantiation(ref inst) => { + inst.trace(context, tracer, &()); + } + TypeKind::Comp(ref ci) => ci.trace(context, tracer, item), + TypeKind::Function(ref sig) => sig.trace(context, tracer, &()), + TypeKind::Enum(ref en) => { + if let Some(repr) = en.repr() { + tracer.visit(repr.into()); + } + } + TypeKind::UnresolvedTypeRef(_, _, Some(id)) => { + tracer.visit(id); + } + + TypeKind::ObjCInterface(ref interface) => { + interface.trace(context, tracer, &()); + } + + // None of these variants have edges to other items and types. + TypeKind::Opaque | + TypeKind::UnresolvedTypeRef(_, _, None) | + TypeKind::TypeParam | + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(_) | + TypeKind::Float(_) | + TypeKind::Complex(_) | + TypeKind::ObjCId | + TypeKind::ObjCSel => {} + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/var.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/var.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/var.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/ir/var.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,488 @@ +//! Intermediate representation of variables. + +use super::super::codegen::MacroTypeVariation; +use super::context::{BindgenContext, TypeId}; +use super::dot::DotAttributes; +use super::function::cursor_mangling; +use super::int::IntKind; +use super::item::Item; +use super::ty::{FloatKind, TypeKind}; +use crate::callbacks::{ItemInfo, ItemKind, MacroParsingBehavior}; +use crate::clang; +use crate::clang::ClangToken; +use crate::parse::{ClangSubItemParser, ParseError, ParseResult}; + +use std::io; +use std::num::Wrapping; + +/// The type for a constant variable. +#[derive(Debug)] +pub(crate) enum VarType { + /// A boolean. + Bool(bool), + /// An integer. + Int(i64), + /// A floating point number. + Float(f64), + /// A character. + Char(u8), + /// A string, not necessarily well-formed utf-8. + String(Vec), +} + +/// A `Var` is our intermediate representation of a variable. +#[derive(Debug)] +pub(crate) struct Var { + /// The name of the variable. + name: String, + /// The mangled name of the variable. + mangled_name: Option, + /// The link name of the variable. + link_name: Option, + /// The type of the variable. + ty: TypeId, + /// The value of the variable, that needs to be suitable for `ty`. + val: Option, + /// Whether this variable is const. + is_const: bool, +} + +impl Var { + /// Construct a new `Var`. + pub(crate) fn new( + name: String, + mangled_name: Option, + link_name: Option, + ty: TypeId, + val: Option, + is_const: bool, + ) -> Var { + assert!(!name.is_empty()); + Var { + name, + mangled_name, + link_name, + ty, + val, + is_const, + } + } + + /// Is this variable `const` qualified? + pub(crate) fn is_const(&self) -> bool { + self.is_const + } + + /// The value of this constant variable, if any. + pub(crate) fn val(&self) -> Option<&VarType> { + self.val.as_ref() + } + + /// Get this variable's type. + pub(crate) fn ty(&self) -> TypeId { + self.ty + } + + /// Get this variable's name. + pub(crate) fn name(&self) -> &str { + &self.name + } + + /// Get this variable's mangled name. + pub(crate) fn mangled_name(&self) -> Option<&str> { + self.mangled_name.as_deref() + } + + /// Get this variable's link name. + pub fn link_name(&self) -> Option<&str> { + self.link_name.as_deref() + } +} + +impl DotAttributes for Var { + fn dot_attributes( + &self, + _ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + if self.is_const { + writeln!(out, "consttrue")?; + } + + if let Some(ref mangled) = self.mangled_name { + writeln!( + out, + "mangled name{}", + mangled + )?; + } + + Ok(()) + } +} + +fn default_macro_constant_type(ctx: &BindgenContext, value: i64) -> IntKind { + if value < 0 || + ctx.options().default_macro_constant_type == + MacroTypeVariation::Signed + { + if value < i32::min_value() as i64 || value > i32::max_value() as i64 { + IntKind::I64 + } else if !ctx.options().fit_macro_constants || + value < i16::min_value() as i64 || + value > i16::max_value() as i64 + { + IntKind::I32 + } else if value < i8::min_value() as i64 || + value > i8::max_value() as i64 + { + IntKind::I16 + } else { + IntKind::I8 + } + } else if value > u32::max_value() as i64 { + IntKind::U64 + } else if !ctx.options().fit_macro_constants || + value > u16::max_value() as i64 + { + IntKind::U32 + } else if value > u8::max_value() as i64 { + IntKind::U16 + } else { + IntKind::U8 + } +} + +/// Parses tokens from a CXCursor_MacroDefinition pointing into a function-like +/// macro, and calls the func_macro callback. +fn handle_function_macro( + cursor: &clang::Cursor, + callbacks: &dyn crate::callbacks::ParseCallbacks, +) { + let is_closing_paren = |t: &ClangToken| { + // Test cheap token kind before comparing exact spellings. + t.kind == clang_sys::CXToken_Punctuation && t.spelling() == b")" + }; + let tokens: Vec<_> = cursor.tokens().iter().collect(); + if let Some(boundary) = tokens.iter().position(is_closing_paren) { + let mut spelled = tokens.iter().map(ClangToken::spelling); + // Add 1, to convert index to length. + let left = spelled.by_ref().take(boundary + 1); + let left = left.collect::>().concat(); + if let Ok(left) = String::from_utf8(left) { + let right: Vec<_> = spelled.collect(); + callbacks.func_macro(&left, &right); + } + } +} + +impl ClangSubItemParser for Var { + fn parse( + cursor: clang::Cursor, + ctx: &mut BindgenContext, + ) -> Result, ParseError> { + use cexpr::expr::EvalResult; + use cexpr::literal::CChar; + use clang_sys::*; + match cursor.kind() { + CXCursor_MacroDefinition => { + for callbacks in &ctx.options().parse_callbacks { + match callbacks.will_parse_macro(&cursor.spelling()) { + MacroParsingBehavior::Ignore => { + return Err(ParseError::Continue); + } + MacroParsingBehavior::Default => {} + } + + if cursor.is_macro_function_like() { + handle_function_macro(&cursor, callbacks.as_ref()); + // We handled the macro, skip macro processing below. + return Err(ParseError::Continue); + } + } + + let value = parse_macro(ctx, &cursor); + + let (id, value) = match value { + Some(v) => v, + None => return Err(ParseError::Continue), + }; + + assert!(!id.is_empty(), "Empty macro name?"); + + let previously_defined = ctx.parsed_macro(&id); + + // NB: It's important to "note" the macro even if the result is + // not an integer, otherwise we might loose other kind of + // derived macros. + ctx.note_parsed_macro(id.clone(), value.clone()); + + if previously_defined { + let name = String::from_utf8(id).unwrap(); + duplicated_macro_diagnostic(&name, cursor.location(), ctx); + return Err(ParseError::Continue); + } + + // NOTE: Unwrapping, here and above, is safe, because the + // identifier of a token comes straight from clang, and we + // enforce utf8 there, so we should have already panicked at + // this point. + let name = String::from_utf8(id).unwrap(); + let (type_kind, val) = match value { + EvalResult::Invalid => return Err(ParseError::Continue), + EvalResult::Float(f) => { + (TypeKind::Float(FloatKind::Double), VarType::Float(f)) + } + EvalResult::Char(c) => { + let c = match c { + CChar::Char(c) => { + assert_eq!(c.len_utf8(), 1); + c as u8 + } + CChar::Raw(c) => { + assert!(c <= ::std::u8::MAX as u64); + c as u8 + } + }; + + (TypeKind::Int(IntKind::U8), VarType::Char(c)) + } + EvalResult::Str(val) => { + let char_ty = Item::builtin_type( + TypeKind::Int(IntKind::U8), + true, + ctx, + ); + for callbacks in &ctx.options().parse_callbacks { + callbacks.str_macro(&name, &val); + } + (TypeKind::Pointer(char_ty), VarType::String(val)) + } + EvalResult::Int(Wrapping(value)) => { + let kind = ctx + .options() + .last_callback(|c| c.int_macro(&name, value)) + .unwrap_or_else(|| { + default_macro_constant_type(ctx, value) + }); + + (TypeKind::Int(kind), VarType::Int(value)) + } + }; + + let ty = Item::builtin_type(type_kind, true, ctx); + + Ok(ParseResult::New( + Var::new(name, None, None, ty, Some(val), true), + Some(cursor), + )) + } + CXCursor_VarDecl => { + let mut name = cursor.spelling(); + if cursor.linkage() == CXLinkage_External { + if let Some(nm) = ctx.options().last_callback(|callbacks| { + callbacks.generated_name_override(ItemInfo { + name: name.as_str(), + kind: ItemKind::Var, + }) + }) { + name = nm; + } + } + // No more changes to name + let name = name; + + if name.is_empty() { + warn!("Empty constant name?"); + return Err(ParseError::Continue); + } + + let link_name = ctx.options().last_callback(|callbacks| { + callbacks.generated_link_name_override(ItemInfo { + name: name.as_str(), + kind: ItemKind::Var, + }) + }); + + let ty = cursor.cur_type(); + + // TODO(emilio): do we have to special-case constant arrays in + // some other places? + let is_const = ty.is_const() || + ([CXType_ConstantArray, CXType_IncompleteArray] + .contains(&ty.kind()) && + ty.elem_type() + .map_or(false, |element| element.is_const())); + + let ty = match Item::from_ty(&ty, cursor, None, ctx) { + Ok(ty) => ty, + Err(e) => { + assert!( + matches!(ty.kind(), CXType_Auto | CXType_Unexposed), + "Couldn't resolve constant type, and it \ + wasn't an nondeductible auto type or unexposed \ + type!" + ); + return Err(e); + } + }; + + // Note: Ty might not be totally resolved yet, see + // tests/headers/inner_const.hpp + // + // That's fine because in that case we know it's not a literal. + let canonical_ty = ctx + .safe_resolve_type(ty) + .and_then(|t| t.safe_canonical_type(ctx)); + + let is_integer = canonical_ty.map_or(false, |t| t.is_integer()); + let is_float = canonical_ty.map_or(false, |t| t.is_float()); + + // TODO: We could handle `char` more gracefully. + // TODO: Strings, though the lookup is a bit more hard (we need + // to look at the canonical type of the pointee too, and check + // is char, u8, or i8 I guess). + let value = if is_integer { + let kind = match *canonical_ty.unwrap().kind() { + TypeKind::Int(kind) => kind, + _ => unreachable!(), + }; + + let mut val = cursor.evaluate().and_then(|v| v.as_int()); + if val.is_none() || !kind.signedness_matches(val.unwrap()) { + val = get_integer_literal_from_cursor(&cursor); + } + + val.map(|val| { + if kind == IntKind::Bool { + VarType::Bool(val != 0) + } else { + VarType::Int(val) + } + }) + } else if is_float { + cursor + .evaluate() + .and_then(|v| v.as_double()) + .map(VarType::Float) + } else { + cursor + .evaluate() + .and_then(|v| v.as_literal_string()) + .map(VarType::String) + }; + + let mangling = cursor_mangling(ctx, &cursor); + let var = + Var::new(name, mangling, link_name, ty, value, is_const); + + Ok(ParseResult::New(var, Some(cursor))) + } + _ => { + /* TODO */ + Err(ParseError::Continue) + } + } + } +} + +/// Try and parse a macro using all the macros parsed until now. +fn parse_macro( + ctx: &BindgenContext, + cursor: &clang::Cursor, +) -> Option<(Vec, cexpr::expr::EvalResult)> { + use cexpr::expr; + + let cexpr_tokens = cursor.cexpr_tokens(); + + let parser = expr::IdentifierParser::new(ctx.parsed_macros()); + + match parser.macro_definition(&cexpr_tokens) { + Ok((_, (id, val))) => Some((id.into(), val)), + _ => None, + } +} + +fn parse_int_literal_tokens(cursor: &clang::Cursor) -> Option { + use cexpr::expr; + use cexpr::expr::EvalResult; + + let cexpr_tokens = cursor.cexpr_tokens(); + + // TODO(emilio): We can try to parse other kinds of literals. + match expr::expr(&cexpr_tokens) { + Ok((_, EvalResult::Int(Wrapping(val)))) => Some(val), + _ => None, + } +} + +fn get_integer_literal_from_cursor(cursor: &clang::Cursor) -> Option { + use clang_sys::*; + let mut value = None; + cursor.visit(|c| { + match c.kind() { + CXCursor_IntegerLiteral | CXCursor_UnaryOperator => { + value = parse_int_literal_tokens(&c); + } + CXCursor_UnexposedExpr => { + value = get_integer_literal_from_cursor(&c); + } + _ => (), + } + if value.is_some() { + CXChildVisit_Break + } else { + CXChildVisit_Continue + } + }); + value +} + +fn duplicated_macro_diagnostic( + macro_name: &str, + _location: crate::clang::SourceLocation, + _ctx: &BindgenContext, +) { + warn!("Duplicated macro definition: {}", macro_name); + + #[cfg(feature = "experimental")] + // FIXME (pvdrz & amanjeev): This diagnostic message shows way too often to be actually + // useful. We have to change the logic where this function is called to be able to emit this + // message only when the duplication is an actuall issue. + // + // If I understood correctly, `bindgen` ignores all `#undef` directives. Meaning that this: + // ```c + // #define FOO 1 + // #undef FOO + // #define FOO 2 + // ``` + // + // Will trigger this message even though there's nothing wrong with it. + #[allow(clippy::overly_complex_bool_expr)] + if false && _ctx.options().emit_diagnostics { + use crate::diagnostics::{get_line, Diagnostic, Level, Slice}; + use std::borrow::Cow; + + let mut slice = Slice::default(); + let mut source = Cow::from(macro_name); + + let (file, line, col, _) = _location.location(); + if let Some(filename) = file.name() { + if let Ok(Some(code)) = get_line(&filename, line) { + source = code.into(); + } + slice.with_location(filename, line, col); + } + + slice.with_source(source); + + Diagnostic::default() + .with_title("Duplicated macro definition.", Level::Warn) + .add_slice(slice) + .add_annotation("This macro had a duplicate.", Level::Note) + .display(); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/lib.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/lib.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/lib.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,1300 @@ +//! Generate Rust bindings for C and C++ libraries. +//! +//! Provide a C/C++ header file, receive Rust FFI code to call into C/C++ +//! functions and use types defined in the header. +//! +//! See the [`Builder`](./struct.Builder.html) struct for usage. +//! +//! See the [Users Guide](https://rust-lang.github.io/rust-bindgen/) for +//! additional documentation. +#![deny(missing_docs)] +#![deny(unused_extern_crates)] +#![deny(clippy::disallowed_methods)] +// To avoid rather annoying warnings when matching with CXCursor_xxx as a +// constant. +#![allow(non_upper_case_globals)] +// `quote!` nests quite deeply. +#![recursion_limit = "128"] + +#[macro_use] +extern crate bitflags; +#[macro_use] +extern crate lazy_static; +#[macro_use] +extern crate quote; + +#[cfg(feature = "logging")] +#[macro_use] +extern crate log; + +#[cfg(not(feature = "logging"))] +#[macro_use] +mod log_stubs; + +#[macro_use] +mod extra_assertions; + +mod codegen; +mod deps; +mod options; +mod time; + +pub mod callbacks; + +mod clang; +#[cfg(feature = "experimental")] +mod diagnostics; +mod features; +mod ir; +mod parse; +mod regex_set; + +pub use codegen::{ + AliasVariation, EnumVariation, MacroTypeVariation, NonCopyUnionStyle, +}; +pub use features::{RustTarget, LATEST_STABLE_RUST, RUST_TARGET_STRINGS}; +pub use ir::annotations::FieldVisibilityKind; +pub use ir::function::Abi; +pub use regex_set::RegexSet; + +use codegen::CodegenError; +use features::RustFeatures; +use ir::comment; +use ir::context::{BindgenContext, ItemId}; +use ir::item::Item; +use options::BindgenOptions; +use parse::ParseError; + +use std::borrow::Cow; +use std::collections::hash_map::Entry; +use std::env; +use std::ffi::OsStr; +use std::fs::{File, OpenOptions}; +use std::io::{self, Write}; +use std::path::{Path, PathBuf}; +use std::process::{Command, Stdio}; +use std::rc::Rc; +use std::str::FromStr; + +// Some convenient typedefs for a fast hash map and hash set. +type HashMap = rustc_hash::FxHashMap; +type HashSet = rustc_hash::FxHashSet; + +/// Default prefix for the anon fields. +pub const DEFAULT_ANON_FIELDS_PREFIX: &str = "__bindgen_anon_"; + +const DEFAULT_NON_EXTERN_FNS_SUFFIX: &str = "__extern"; + +fn file_is_cpp(name_file: &str) -> bool { + name_file.ends_with(".hpp") || + name_file.ends_with(".hxx") || + name_file.ends_with(".hh") || + name_file.ends_with(".h++") +} + +fn args_are_cpp(clang_args: &[String]) -> bool { + for w in clang_args.windows(2) { + if w[0] == "-xc++" || w[1] == "-xc++" { + return true; + } + if w[0] == "-x" && w[1] == "c++" { + return true; + } + if w[0] == "-include" && file_is_cpp(&w[1]) { + return true; + } + } + false +} + +bitflags! { + /// A type used to indicate which kind of items we have to generate. + pub struct CodegenConfig: u32 { + /// Whether to generate functions. + const FUNCTIONS = 1 << 0; + /// Whether to generate types. + const TYPES = 1 << 1; + /// Whether to generate constants. + const VARS = 1 << 2; + /// Whether to generate methods. + const METHODS = 1 << 3; + /// Whether to generate constructors + const CONSTRUCTORS = 1 << 4; + /// Whether to generate destructors. + const DESTRUCTORS = 1 << 5; + } +} + +impl CodegenConfig { + /// Returns true if functions should be generated. + pub fn functions(self) -> bool { + self.contains(CodegenConfig::FUNCTIONS) + } + + /// Returns true if types should be generated. + pub fn types(self) -> bool { + self.contains(CodegenConfig::TYPES) + } + + /// Returns true if constants should be generated. + pub fn vars(self) -> bool { + self.contains(CodegenConfig::VARS) + } + + /// Returns true if methds should be generated. + pub fn methods(self) -> bool { + self.contains(CodegenConfig::METHODS) + } + + /// Returns true if constructors should be generated. + pub fn constructors(self) -> bool { + self.contains(CodegenConfig::CONSTRUCTORS) + } + + /// Returns true if destructors should be generated. + pub fn destructors(self) -> bool { + self.contains(CodegenConfig::DESTRUCTORS) + } +} + +impl Default for CodegenConfig { + fn default() -> Self { + CodegenConfig::all() + } +} + +/// Formatting tools that can be used to format the bindings +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[non_exhaustive] +pub enum Formatter { + /// Do not format the bindings. + None, + /// Use `rustfmt` to format the bindings. + Rustfmt, + #[cfg(feature = "prettyplease")] + /// Use `prettyplease` to format the bindings. + Prettyplease, +} + +impl Default for Formatter { + fn default() -> Self { + Self::Rustfmt + } +} + +impl FromStr for Formatter { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "none" => Ok(Self::None), + "rustfmt" => Ok(Self::Rustfmt), + #[cfg(feature = "prettyplease")] + "prettyplease" => Ok(Self::Prettyplease), + _ => Err(format!("`{}` is not a valid formatter", s)), + } + } +} + +impl std::fmt::Display for Formatter { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match self { + Self::None => "none", + Self::Rustfmt => "rustfmt", + #[cfg(feature = "prettyplease")] + Self::Prettyplease => "prettyplease", + }; + + s.fmt(f) + } +} + +/// Configure and generate Rust bindings for a C/C++ header. +/// +/// This is the main entry point to the library. +/// +/// ```ignore +/// use bindgen::builder; +/// +/// // Configure and generate bindings. +/// let bindings = builder().header("path/to/input/header") +/// .allowlist_type("SomeCoolClass") +/// .allowlist_function("do_some_cool_thing") +/// .generate()?; +/// +/// // Write the generated bindings to an output file. +/// bindings.write_to_file("path/to/output.rs")?; +/// ``` +/// +/// # Enums +/// +/// Bindgen can map C/C++ enums into Rust in different ways. The way bindgen maps enums depends on +/// the pattern passed to several methods: +/// +/// 1. [`constified_enum_module()`](#method.constified_enum_module) +/// 2. [`bitfield_enum()`](#method.bitfield_enum) +/// 3. [`newtype_enum()`](#method.newtype_enum) +/// 4. [`rustified_enum()`](#method.rustified_enum) +/// +/// For each C enum, bindgen tries to match the pattern in the following order: +/// +/// 1. Constified enum module +/// 2. Bitfield enum +/// 3. Newtype enum +/// 4. Rustified enum +/// +/// If none of the above patterns match, then bindgen will generate a set of Rust constants. +/// +/// # Clang arguments +/// +/// Extra arguments can be passed to with clang: +/// 1. [`clang_arg()`](#method.clang_arg): takes a single argument +/// 2. [`clang_args()`](#method.clang_args): takes an iterator of arguments +/// 3. `BINDGEN_EXTRA_CLANG_ARGS` environment variable: whitespace separate +/// environment variable of arguments +/// +/// Clang arguments specific to your crate should be added via the +/// `clang_arg()`/`clang_args()` methods. +/// +/// End-users of the crate may need to set the `BINDGEN_EXTRA_CLANG_ARGS` environment variable to +/// add additional arguments. For example, to build against a different sysroot a user could set +/// `BINDGEN_EXTRA_CLANG_ARGS` to `--sysroot=/path/to/sysroot`. +/// +/// # Regular expression arguments +/// +/// Some [`Builder`] methods such as the `allowlist_*` and `blocklist_*` methods allow regular +/// expressions as arguments. These regular expressions will be enclosed in parentheses and +/// anchored with `^` and `$`. So if the argument passed is ``, the regular expression to be +/// stored will be `^()$`. +/// +/// As a consequence, regular expressions passed to `bindgen` will try to match the whole name of +/// an item instead of a section of it, which means that to match any items with the prefix +/// `prefix`, the `prefix.*` regular expression must be used. +/// +/// Certain methods, like [`Builder::allowlist_function`], use regular expressions over function +/// names. To match C++ methods, prefix the name of the type where they belong followed by an +/// underscore. So if the type `Foo` has a method `bar`, it can be matched with the `Foo_bar` +/// regular expression. +/// +/// Additionally, Objective-C interfaces can be matched by prefixing the regular expression with +/// `I`. For example, the `IFoo` regular expression matches the `Foo` interface and the `IFoo_foo` +/// regular expression matches the `foo` method of the `Foo` interface. +/// +/// Releases of `bindgen` with a version lesser or equal to `0.62.0` used to accept the wildcard +/// pattern `*` as a valid regular expression. This behavior has been deprecated and the `.*` +/// regular expression must be used instead. +#[derive(Debug, Default, Clone)] +pub struct Builder { + options: BindgenOptions, +} + +/// Construct a new [`Builder`](./struct.Builder.html). +pub fn builder() -> Builder { + Default::default() +} + +fn get_extra_clang_args( + parse_callbacks: &[Rc], +) -> Vec { + // Add any extra arguments from the environment to the clang command line. + let extra_clang_args = match get_target_dependent_env_var( + parse_callbacks, + "BINDGEN_EXTRA_CLANG_ARGS", + ) { + None => return vec![], + Some(s) => s, + }; + + // Try to parse it with shell quoting. If we fail, make it one single big argument. + if let Some(strings) = shlex::split(&extra_clang_args) { + return strings; + } + vec![extra_clang_args] +} + +impl Builder { + /// Generate the Rust bindings using the options built up thus far. + pub fn generate(mut self) -> Result { + // Add any extra arguments from the environment to the clang command line. + self.options + .clang_args + .extend(get_extra_clang_args(&self.options.parse_callbacks)); + + // Transform input headers to arguments on the clang command line. + self.options.clang_args.extend( + self.options.input_headers + [..self.options.input_headers.len().saturating_sub(1)] + .iter() + .flat_map(|header| ["-include".into(), header.to_string()]), + ); + + let input_unsaved_files = + std::mem::take(&mut self.options.input_header_contents) + .into_iter() + .map(|(name, contents)| clang::UnsavedFile::new(name, contents)) + .collect::>(); + + Bindings::generate(self.options, input_unsaved_files) + } + + /// Preprocess and dump the input header files to disk. + /// + /// This is useful when debugging bindgen, using C-Reduce, or when filing + /// issues. The resulting file will be named something like `__bindgen.i` or + /// `__bindgen.ii` + pub fn dump_preprocessed_input(&self) -> io::Result<()> { + let clang = + clang_sys::support::Clang::find(None, &[]).ok_or_else(|| { + io::Error::new( + io::ErrorKind::Other, + "Cannot find clang executable", + ) + })?; + + // The contents of a wrapper file that includes all the input header + // files. + let mut wrapper_contents = String::new(); + + // Whether we are working with C or C++ inputs. + let mut is_cpp = args_are_cpp(&self.options.clang_args); + + // For each input header, add `#include "$header"`. + for header in &self.options.input_headers { + is_cpp |= file_is_cpp(header); + + wrapper_contents.push_str("#include \""); + wrapper_contents.push_str(header); + wrapper_contents.push_str("\"\n"); + } + + // For each input header content, add a prefix line of `#line 0 "$name"` + // followed by the contents. + for (name, contents) in &self.options.input_header_contents { + is_cpp |= file_is_cpp(name); + + wrapper_contents.push_str("#line 0 \""); + wrapper_contents.push_str(name); + wrapper_contents.push_str("\"\n"); + wrapper_contents.push_str(contents); + } + + let wrapper_path = PathBuf::from(if is_cpp { + "__bindgen.cpp" + } else { + "__bindgen.c" + }); + + { + let mut wrapper_file = File::create(&wrapper_path)?; + wrapper_file.write_all(wrapper_contents.as_bytes())?; + } + + let mut cmd = Command::new(clang.path); + cmd.arg("-save-temps") + .arg("-E") + .arg("-C") + .arg("-c") + .arg(&wrapper_path) + .stdout(Stdio::piped()); + + for a in &self.options.clang_args { + cmd.arg(a); + } + + for a in get_extra_clang_args(&self.options.parse_callbacks) { + cmd.arg(a); + } + + let mut child = cmd.spawn()?; + + let mut preprocessed = child.stdout.take().unwrap(); + let mut file = File::create(if is_cpp { + "__bindgen.ii" + } else { + "__bindgen.i" + })?; + io::copy(&mut preprocessed, &mut file)?; + + if child.wait()?.success() { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + "clang exited with non-zero status", + )) + } + } +} + +impl BindgenOptions { + fn build(&mut self) { + const REGEX_SETS_LEN: usize = 27; + + let regex_sets: [_; REGEX_SETS_LEN] = [ + &mut self.allowlisted_vars, + &mut self.allowlisted_types, + &mut self.allowlisted_functions, + &mut self.allowlisted_files, + &mut self.blocklisted_types, + &mut self.blocklisted_functions, + &mut self.blocklisted_items, + &mut self.blocklisted_files, + &mut self.opaque_types, + &mut self.bitfield_enums, + &mut self.constified_enums, + &mut self.constified_enum_modules, + &mut self.newtype_enums, + &mut self.newtype_global_enums, + &mut self.rustified_enums, + &mut self.rustified_non_exhaustive_enums, + &mut self.type_alias, + &mut self.new_type_alias, + &mut self.new_type_alias_deref, + &mut self.bindgen_wrapper_union, + &mut self.manually_drop_union, + &mut self.no_partialeq_types, + &mut self.no_copy_types, + &mut self.no_debug_types, + &mut self.no_default_types, + &mut self.no_hash_types, + &mut self.must_use_types, + ]; + + let record_matches = self.record_matches; + #[cfg(feature = "experimental")] + { + let sets_len = REGEX_SETS_LEN + self.abi_overrides.len(); + let names = if self.emit_diagnostics { + <[&str; REGEX_SETS_LEN]>::into_iter([ + "--blocklist-type", + "--blocklist-function", + "--blocklist-item", + "--blocklist-file", + "--opaque-type", + "--allowlist-type", + "--allowlist-function", + "--allowlist-var", + "--allowlist-file", + "--bitfield-enum", + "--newtype-enum", + "--newtype-global-enum", + "--rustified-enum", + "--rustified-enum-non-exhaustive", + "--constified-enum-module", + "--constified-enum", + "--type-alias", + "--new-type-alias", + "--new-type-alias-deref", + "--bindgen-wrapper-union", + "--manually-drop-union", + "--no-partialeq", + "--no-copy", + "--no-debug", + "--no-default", + "--no-hash", + "--must-use", + ]) + .chain((0..self.abi_overrides.len()).map(|_| "--override-abi")) + .map(Some) + .collect() + } else { + vec![None; sets_len] + }; + + for (regex_set, name) in + self.abi_overrides.values_mut().chain(regex_sets).zip(names) + { + regex_set.build_with_diagnostics(record_matches, name); + } + } + #[cfg(not(feature = "experimental"))] + for regex_set in self.abi_overrides.values_mut().chain(regex_sets) { + regex_set.build(record_matches); + } + + let rust_target = self.rust_target; + #[allow(deprecated)] + if rust_target <= RustTarget::Stable_1_30 { + deprecated_target_diagnostic(rust_target, self); + } + + // Disable `untagged_union` if the target does not support it. + if !self.rust_features.untagged_union { + self.untagged_union = false; + } + } + + /// Update rust target version + pub fn set_rust_target(&mut self, rust_target: RustTarget) { + self.rust_target = rust_target; + + // Keep rust_features synced with rust_target + self.rust_features = rust_target.into(); + } + + /// Get features supported by target Rust version + pub fn rust_features(&self) -> RustFeatures { + self.rust_features + } + + fn last_callback( + &self, + f: impl Fn(&dyn callbacks::ParseCallbacks) -> Option, + ) -> Option { + self.parse_callbacks + .iter() + .filter_map(|cb| f(cb.as_ref())) + .last() + } + + fn all_callbacks( + &self, + f: impl Fn(&dyn callbacks::ParseCallbacks) -> Vec, + ) -> Vec { + self.parse_callbacks + .iter() + .flat_map(|cb| f(cb.as_ref())) + .collect() + } + + fn process_comment(&self, comment: &str) -> String { + let comment = comment::preprocess(comment); + self.parse_callbacks + .last() + .and_then(|cb| cb.process_comment(&comment)) + .unwrap_or(comment) + } +} + +fn deprecated_target_diagnostic(target: RustTarget, _options: &BindgenOptions) { + let target = String::from(target); + warn!("The {} Rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues", target,); + + #[cfg(feature = "experimental")] + if _options.emit_diagnostics { + use crate::diagnostics::{Diagnostic, Level}; + + let mut diagnostic = Diagnostic::default(); + diagnostic.with_title( + format!("The {} Rust target is deprecated.", target), + Level::Warn, + ); + diagnostic.add_annotation( + "This Rust target was passed to `--rust-target`", + Level::Info, + ); + diagnostic.add_annotation("If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues", Level::Help); + diagnostic.display(); + } +} + +#[cfg(feature = "runtime")] +fn ensure_libclang_is_loaded() { + if clang_sys::is_loaded() { + return; + } + + // XXX (issue #350): Ensure that our dynamically loaded `libclang` + // doesn't get dropped prematurely, nor is loaded multiple times + // across different threads. + + lazy_static! { + static ref LIBCLANG: std::sync::Arc = { + clang_sys::load().expect("Unable to find libclang"); + clang_sys::get_library().expect( + "We just loaded libclang and it had better still be \ + here!", + ) + }; + } + + clang_sys::set_library(Some(LIBCLANG.clone())); +} + +#[cfg(not(feature = "runtime"))] +fn ensure_libclang_is_loaded() {} + +/// Error type for rust-bindgen. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[non_exhaustive] +pub enum BindgenError { + /// The header was a folder. + FolderAsHeader(PathBuf), + /// Permissions to read the header is insufficient. + InsufficientPermissions(PathBuf), + /// The header does not exist. + NotExist(PathBuf), + /// Clang diagnosed an error. + ClangDiagnostic(String), + /// Code generation reported an error. + Codegen(CodegenError), +} + +impl std::fmt::Display for BindgenError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BindgenError::FolderAsHeader(h) => { + write!(f, "'{}' is a folder", h.display()) + } + BindgenError::InsufficientPermissions(h) => { + write!(f, "insufficient permissions to read '{}'", h.display()) + } + BindgenError::NotExist(h) => { + write!(f, "header '{}' does not exist.", h.display()) + } + BindgenError::ClangDiagnostic(message) => { + write!(f, "clang diagnosed error: {}", message) + } + BindgenError::Codegen(err) => { + write!(f, "codegen error: {}", err) + } + } + } +} + +impl std::error::Error for BindgenError {} + +/// Generated Rust bindings. +#[derive(Debug)] +pub struct Bindings { + options: BindgenOptions, + module: proc_macro2::TokenStream, +} + +pub(crate) const HOST_TARGET: &str = + include_str!(concat!(env!("OUT_DIR"), "/host-target.txt")); + +// Some architecture triplets are different between rust and libclang, see #1211 +// and duplicates. +fn rust_to_clang_target(rust_target: &str) -> String { + if rust_target.starts_with("aarch64-apple-") { + let mut clang_target = "arm64-apple-".to_owned(); + clang_target + .push_str(rust_target.strip_prefix("aarch64-apple-").unwrap()); + return clang_target; + } else if rust_target.starts_with("riscv64gc-") { + let mut clang_target = "riscv64-".to_owned(); + clang_target.push_str(rust_target.strip_prefix("riscv64gc-").unwrap()); + return clang_target; + } else if rust_target.ends_with("-espidf") { + let mut clang_target = + rust_target.strip_suffix("-espidf").unwrap().to_owned(); + clang_target.push_str("-elf"); + if clang_target.starts_with("riscv32imc-") { + clang_target = "riscv32-".to_owned() + + clang_target.strip_prefix("riscv32imc-").unwrap(); + } + return clang_target; + } + rust_target.to_owned() +} + +/// Returns the effective target, and whether it was explicitly specified on the +/// clang flags. +fn find_effective_target(clang_args: &[String]) -> (String, bool) { + let mut args = clang_args.iter(); + while let Some(opt) = args.next() { + if opt.starts_with("--target=") { + let mut split = opt.split('='); + split.next(); + return (split.next().unwrap().to_owned(), true); + } + + if opt == "-target" { + if let Some(target) = args.next() { + return (target.clone(), true); + } + } + } + + // If we're running from a build script, try to find the cargo target. + if let Ok(t) = env::var("TARGET") { + return (rust_to_clang_target(&t), false); + } + + (rust_to_clang_target(HOST_TARGET), false) +} + +impl Bindings { + /// Generate bindings for the given options. + pub(crate) fn generate( + mut options: BindgenOptions, + input_unsaved_files: Vec, + ) -> Result { + ensure_libclang_is_loaded(); + + #[cfg(feature = "runtime")] + debug!( + "Generating bindings, libclang at {}", + clang_sys::get_library().unwrap().path().display() + ); + #[cfg(not(feature = "runtime"))] + debug!("Generating bindings, libclang linked"); + + options.build(); + + let (effective_target, explicit_target) = + find_effective_target(&options.clang_args); + + let is_host_build = + rust_to_clang_target(HOST_TARGET) == effective_target; + + // NOTE: The is_host_build check wouldn't be sound normally in some + // cases if we were to call a binary (if you have a 32-bit clang and are + // building on a 64-bit system for example). But since we rely on + // opening libclang.so, it has to be the same architecture and thus the + // check is fine. + if !explicit_target && !is_host_build { + options + .clang_args + .insert(0, format!("--target={}", effective_target)); + }; + + fn detect_include_paths(options: &mut BindgenOptions) { + if !options.detect_include_paths { + return; + } + + // Filter out include paths and similar stuff, so we don't incorrectly + // promote them to `-isystem`. + let clang_args_for_clang_sys = { + let mut last_was_include_prefix = false; + options + .clang_args + .iter() + .filter(|arg| { + if last_was_include_prefix { + last_was_include_prefix = false; + return false; + } + + let arg = &**arg; + + // https://clang.llvm.org/docs/ClangCommandLineReference.html + // -isystem and -isystem-after are harmless. + if arg == "-I" || arg == "--include-directory" { + last_was_include_prefix = true; + return false; + } + + if arg.starts_with("-I") || + arg.starts_with("--include-directory=") + { + return false; + } + + true + }) + .cloned() + .collect::>() + }; + + debug!( + "Trying to find clang with flags: {:?}", + clang_args_for_clang_sys + ); + + let clang = match clang_sys::support::Clang::find( + None, + &clang_args_for_clang_sys, + ) { + None => return, + Some(clang) => clang, + }; + + debug!("Found clang: {:?}", clang); + + // Whether we are working with C or C++ inputs. + let is_cpp = args_are_cpp(&options.clang_args) || + options.input_headers.iter().any(|h| file_is_cpp(h)); + + let search_paths = if is_cpp { + clang.cpp_search_paths + } else { + clang.c_search_paths + }; + + if let Some(search_paths) = search_paths { + for path in search_paths.into_iter() { + if let Ok(path) = path.into_os_string().into_string() { + options.clang_args.push("-isystem".to_owned()); + options.clang_args.push(path); + } + } + } + } + + detect_include_paths(&mut options); + + #[cfg(unix)] + fn can_read(perms: &std::fs::Permissions) -> bool { + use std::os::unix::fs::PermissionsExt; + perms.mode() & 0o444 > 0 + } + + #[cfg(not(unix))] + fn can_read(_: &std::fs::Permissions) -> bool { + true + } + + if let Some(h) = options.input_headers.last() { + let path = Path::new(h); + if let Ok(md) = std::fs::metadata(path) { + if md.is_dir() { + return Err(BindgenError::FolderAsHeader(path.into())); + } + if !can_read(&md.permissions()) { + return Err(BindgenError::InsufficientPermissions( + path.into(), + )); + } + let h = h.clone(); + options.clang_args.push(h); + } else { + return Err(BindgenError::NotExist(path.into())); + } + } + + for (idx, f) in input_unsaved_files.iter().enumerate() { + if idx != 0 || !options.input_headers.is_empty() { + options.clang_args.push("-include".to_owned()); + } + options.clang_args.push(f.name.to_str().unwrap().to_owned()) + } + + debug!("Fixed-up options: {:?}", options); + + let time_phases = options.time_phases; + let mut context = BindgenContext::new(options, &input_unsaved_files); + + if is_host_build { + debug_assert_eq!( + context.target_pointer_size(), + std::mem::size_of::<*mut ()>(), + "{:?} {:?}", + effective_target, + HOST_TARGET + ); + } + + { + let _t = time::Timer::new("parse").with_output(time_phases); + parse(&mut context)?; + } + + let (module, options) = + codegen::codegen(context).map_err(BindgenError::Codegen)?; + + Ok(Bindings { options, module }) + } + + /// Write these bindings as source text to a file. + pub fn write_to_file>(&self, path: P) -> io::Result<()> { + let file = OpenOptions::new() + .write(true) + .truncate(true) + .create(true) + .open(path.as_ref())?; + self.write(Box::new(file))?; + Ok(()) + } + + /// Write these bindings as source text to the given `Write`able. + pub fn write<'a>(&self, mut writer: Box) -> io::Result<()> { + if !self.options.disable_header_comment { + let version = option_env!("CARGO_PKG_VERSION"); + let header = format!( + "/* automatically generated by rust-bindgen {} */\n\n", + version.unwrap_or("(unknown version)") + ); + writer.write_all(header.as_bytes())?; + } + + for line in self.options.raw_lines.iter() { + writer.write_all(line.as_bytes())?; + writer.write_all("\n".as_bytes())?; + } + + if !self.options.raw_lines.is_empty() { + writer.write_all("\n".as_bytes())?; + } + + match self.format_tokens(&self.module) { + Ok(formatted_bindings) => { + writer.write_all(formatted_bindings.as_bytes())?; + } + Err(err) => { + eprintln!( + "Failed to run rustfmt: {} (non-fatal, continuing)", + err + ); + writer.write_all(self.module.to_string().as_bytes())?; + } + } + Ok(()) + } + + /// Gets the rustfmt path to rustfmt the generated bindings. + fn rustfmt_path(&self) -> io::Result> { + debug_assert!(matches!(self.options.formatter, Formatter::Rustfmt)); + if let Some(ref p) = self.options.rustfmt_path { + return Ok(Cow::Borrowed(p)); + } + if let Ok(rustfmt) = env::var("RUSTFMT") { + return Ok(Cow::Owned(rustfmt.into())); + } + #[cfg(feature = "which-rustfmt")] + match which::which("rustfmt") { + Ok(p) => Ok(Cow::Owned(p)), + Err(e) => { + Err(io::Error::new(io::ErrorKind::Other, format!("{}", e))) + } + } + #[cfg(not(feature = "which-rustfmt"))] + // No rustfmt binary was specified, so assume that the binary is called + // "rustfmt" and that it is in the user's PATH. + Ok(Cow::Owned("rustfmt".into())) + } + + /// Formats a token stream with the formatter set up in `BindgenOptions`. + fn format_tokens( + &self, + tokens: &proc_macro2::TokenStream, + ) -> io::Result { + let _t = time::Timer::new("rustfmt_generated_string") + .with_output(self.options.time_phases); + + match self.options.formatter { + Formatter::None => return Ok(tokens.to_string()), + + #[cfg(feature = "prettyplease")] + Formatter::Prettyplease => { + return Ok(prettyplease::unparse(&syn::parse_quote!(#tokens))); + } + Formatter::Rustfmt => (), + } + + let rustfmt = self.rustfmt_path()?; + let mut cmd = Command::new(&*rustfmt); + + cmd.stdin(Stdio::piped()).stdout(Stdio::piped()); + + if let Some(path) = self + .options + .rustfmt_configuration_file + .as_ref() + .and_then(|f| f.to_str()) + { + cmd.args(["--config-path", path]); + } + + let mut child = cmd.spawn()?; + let mut child_stdin = child.stdin.take().unwrap(); + let mut child_stdout = child.stdout.take().unwrap(); + + let source = tokens.to_string(); + + // Write to stdin in a new thread, so that we can read from stdout on this + // thread. This keeps the child from blocking on writing to its stdout which + // might block us from writing to its stdin. + let stdin_handle = ::std::thread::spawn(move || { + let _ = child_stdin.write_all(source.as_bytes()); + source + }); + + let mut output = vec![]; + io::copy(&mut child_stdout, &mut output)?; + + let status = child.wait()?; + let source = stdin_handle.join().expect( + "The thread writing to rustfmt's stdin doesn't do \ + anything that could panic", + ); + + match String::from_utf8(output) { + Ok(bindings) => match status.code() { + Some(0) => Ok(bindings), + Some(2) => Err(io::Error::new( + io::ErrorKind::Other, + "Rustfmt parsing errors.".to_string(), + )), + Some(3) => { + rustfmt_non_fatal_error_diagnostic( + "Rustfmt could not format some lines", + &self.options, + ); + Ok(bindings) + } + _ => Err(io::Error::new( + io::ErrorKind::Other, + "Internal rustfmt error".to_string(), + )), + }, + _ => Ok(source), + } + } +} + +fn rustfmt_non_fatal_error_diagnostic(msg: &str, _options: &BindgenOptions) { + warn!("{}", msg); + + #[cfg(feature = "experimental")] + if _options.emit_diagnostics { + use crate::diagnostics::{Diagnostic, Level}; + + Diagnostic::default() + .with_title(msg, Level::Warn) + .add_annotation( + "The bindings will be generated but not formatted.", + Level::Note, + ) + .display(); + } +} + +impl std::fmt::Display for Bindings { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut bytes = vec![]; + self.write(Box::new(&mut bytes) as Box) + .expect("writing to a vec cannot fail"); + f.write_str( + std::str::from_utf8(&bytes) + .expect("we should only write bindings that are valid utf-8"), + ) + } +} + +/// Determines whether the given cursor is in any of the files matched by the +/// options. +fn filter_builtins(ctx: &BindgenContext, cursor: &clang::Cursor) -> bool { + ctx.options().builtins || !cursor.is_builtin() +} + +/// Parse one `Item` from the Clang cursor. +fn parse_one( + ctx: &mut BindgenContext, + cursor: clang::Cursor, + parent: Option, +) -> clang_sys::CXChildVisitResult { + if !filter_builtins(ctx, &cursor) { + return CXChildVisit_Continue; + } + + use clang_sys::CXChildVisit_Continue; + match Item::parse(cursor, parent, ctx) { + Ok(..) => {} + Err(ParseError::Continue) => {} + Err(ParseError::Recurse) => { + cursor.visit(|child| parse_one(ctx, child, parent)); + } + } + CXChildVisit_Continue +} + +/// Parse the Clang AST into our `Item` internal representation. +fn parse(context: &mut BindgenContext) -> Result<(), BindgenError> { + use clang_sys::*; + + let mut error = None; + for d in context.translation_unit().diags().iter() { + let msg = d.format(); + let is_err = d.severity() >= CXDiagnostic_Error; + if is_err { + let error = error.get_or_insert_with(String::new); + error.push_str(&msg); + error.push('\n'); + } else { + eprintln!("clang diag: {}", msg); + } + } + + if let Some(message) = error { + return Err(BindgenError::ClangDiagnostic(message)); + } + + let cursor = context.translation_unit().cursor(); + + if context.options().emit_ast { + fn dump_if_not_builtin(cur: &clang::Cursor) -> CXChildVisitResult { + if !cur.is_builtin() { + clang::ast_dump(cur, 0) + } else { + CXChildVisit_Continue + } + } + cursor.visit(|cur| dump_if_not_builtin(&cur)); + } + + let root = context.root_module(); + context.with_module(root, |context| { + cursor.visit(|cursor| parse_one(context, cursor, None)) + }); + + assert!( + context.current_module() == context.root_module(), + "How did this happen?" + ); + Ok(()) +} + +/// Extracted Clang version data +#[derive(Debug)] +pub struct ClangVersion { + /// Major and minor semver, if parsing was successful + pub parsed: Option<(u32, u32)>, + /// full version string + pub full: String, +} + +/// Get the major and the minor semver numbers of Clang's version +pub fn clang_version() -> ClangVersion { + ensure_libclang_is_loaded(); + + //Debian clang version 11.0.1-2 + let raw_v: String = clang::extract_clang_version(); + let split_v: Option> = raw_v + .split_whitespace() + .find(|t| t.chars().next().map_or(false, |v| v.is_ascii_digit())) + .map(|v| v.split('.').collect()); + if let Some(v) = split_v { + if v.len() >= 2 { + let maybe_major = v[0].parse::(); + let maybe_minor = v[1].parse::(); + if let (Ok(major), Ok(minor)) = (maybe_major, maybe_minor) { + return ClangVersion { + parsed: Some((major, minor)), + full: raw_v.clone(), + }; + } + } + }; + ClangVersion { + parsed: None, + full: raw_v.clone(), + } +} + +fn env_var + AsRef>( + parse_callbacks: &[Rc], + key: K, +) -> Result { + for callback in parse_callbacks { + callback.read_env_var(key.as_ref()); + } + std::env::var(key) +} + +/// Looks for the env var `var_${TARGET}`, and falls back to just `var` when it is not found. +fn get_target_dependent_env_var( + parse_callbacks: &[Rc], + var: &str, +) -> Option { + if let Ok(target) = env_var(parse_callbacks, "TARGET") { + if let Ok(v) = env_var(parse_callbacks, format!("{}_{}", var, target)) { + return Some(v); + } + if let Ok(v) = env_var( + parse_callbacks, + format!("{}_{}", var, target.replace('-', "_")), + ) { + return Some(v); + } + } + + env_var(parse_callbacks, var).ok() +} + +/// A ParseCallbacks implementation that will act on file includes by echoing a rerun-if-changed +/// line and on env variable usage by echoing a rerun-if-env-changed line +/// +/// When running inside a `build.rs` script, this can be used to make cargo invalidate the +/// generated bindings whenever any of the files included from the header change: +/// ``` +/// use bindgen::builder; +/// let bindings = builder() +/// .header("path/to/input/header") +/// .parse_callbacks(Box::new(bindgen::CargoCallbacks)) +/// .generate(); +/// ``` +#[derive(Debug)] +pub struct CargoCallbacks; + +impl callbacks::ParseCallbacks for CargoCallbacks { + fn include_file(&self, filename: &str) { + println!("cargo:rerun-if-changed={}", filename); + } + + fn read_env_var(&self, key: &str) { + println!("cargo:rerun-if-env-changed={}", key); + } +} + +/// Test command_line_flag function. +#[test] +fn commandline_flag_unit_test_function() { + //Test 1 + let bindings = crate::builder(); + let command_line_flags = bindings.command_line_flags(); + + let test_cases = vec![ + "--rust-target", + "--no-derive-default", + "--generate", + "functions,types,vars,methods,constructors,destructors", + ] + .iter() + .map(|&x| x.into()) + .collect::>(); + + assert!(test_cases.iter().all(|x| command_line_flags.contains(x))); + + //Test 2 + let bindings = crate::builder() + .header("input_header") + .allowlist_type("Distinct_Type") + .allowlist_function("safe_function"); + + let command_line_flags = bindings.command_line_flags(); + let test_cases = vec![ + "--rust-target", + "input_header", + "--no-derive-default", + "--generate", + "functions,types,vars,methods,constructors,destructors", + "--allowlist-type", + "Distinct_Type", + "--allowlist-function", + "safe_function", + ] + .iter() + .map(|&x| x.into()) + .collect::>(); + println!("{:?}", command_line_flags); + + assert!(test_cases.iter().all(|x| command_line_flags.contains(x))); +} + +#[test] +fn test_rust_to_clang_target() { + assert_eq!(rust_to_clang_target("aarch64-apple-ios"), "arm64-apple-ios"); +} + +#[test] +fn test_rust_to_clang_target_riscv() { + assert_eq!( + rust_to_clang_target("riscv64gc-unknown-linux-gnu"), + "riscv64-unknown-linux-gnu" + ) +} + +#[test] +fn test_rust_to_clang_target_espidf() { + assert_eq!( + rust_to_clang_target("riscv32imc-esp-espidf"), + "riscv32-esp-elf" + ); + assert_eq!( + rust_to_clang_target("xtensa-esp32-espidf"), + "xtensa-esp32-elf" + ); +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/log_stubs.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/log_stubs.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/log_stubs.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/log_stubs.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,32 @@ +#![allow(unused)] + +macro_rules! log { + (target: $target:expr, $lvl:expr, $($arg:tt)+) => {{ + let _ = $target; + let _ = log!($lvl, $($arg)+); + }}; + ($lvl:expr, $($arg:tt)+) => {{ + let _ = $lvl; + let _ = format_args!($($arg)+); + }}; +} +macro_rules! error { + (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; + ($($arg:tt)+) => { log!("", $($arg)+) }; +} +macro_rules! warn { + (target: $target:expr, $($arg:tt)*) => { log!(target: $target, "", $($arg)*) }; + ($($arg:tt)*) => { log!("", $($arg)*) }; +} +macro_rules! info { + (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; + ($($arg:tt)+) => { log!("", $($arg)+) }; +} +macro_rules! debug { + (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; + ($($arg:tt)+) => { log!("", $($arg)+) }; +} +macro_rules! trace { + (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; + ($($arg:tt)+) => { log!("", $($arg)+) }; +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/options/as_args.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/options/as_args.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/options/as_args.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/options/as_args.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,52 @@ +use std::path::PathBuf; + +use crate::RegexSet; + +/// Trait used to turn [`crate::BindgenOptions`] fields into CLI args. +pub(super) trait AsArgs { + fn as_args(&self, args: &mut Vec, flag: &str); +} + +/// If the `bool` is `true`, `flag` is pushed into `args`. +/// +/// be careful about the truth value of the field as some options, like `--no-layout-tests`, are +/// actually negations of the fields. +impl AsArgs for bool { + fn as_args(&self, args: &mut Vec, flag: &str) { + if *self { + args.push(flag.to_string()); + } + } +} + +/// Iterate over all the items of the `RegexSet` and push `flag` followed by the item into `args` +/// for each item. +impl AsArgs for RegexSet { + fn as_args(&self, args: &mut Vec, flag: &str) { + for item in self.get_items() { + args.extend_from_slice(&[flag.to_owned(), item.clone()]); + } + } +} + +/// If the `Option` is `Some(value)`, push `flag` followed by `value`. +impl AsArgs for Option { + fn as_args(&self, args: &mut Vec, flag: &str) { + if let Some(string) = self { + args.extend_from_slice(&[flag.to_owned(), string.clone()]); + } + } +} + +/// If the `Option` is `Some(path)`, push `flag` followed by the [`std::path::Path::display`] +/// representation of `path`. +impl AsArgs for Option { + fn as_args(&self, args: &mut Vec, flag: &str) { + if let Some(path) = self { + args.extend_from_slice(&[ + flag.to_owned(), + path.display().to_string(), + ]); + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/options/helpers.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/options/helpers.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/options/helpers.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/options/helpers.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,43 @@ +/// Helper function that appends extra documentation to [`crate::Builder`] methods that support regular +/// expressions in their input. +macro_rules! regex_option { + ($(#[$attrs:meta])* pub fn $($tokens:tt)*) => { + $(#[$attrs])* + /// + /// Regular expressions are supported. Check the [regular expression + /// arguments](./struct.Builder.html#regular-expression-arguments) section and the + /// [regex](https://docs.rs/regex) crate documentation for further information. + pub fn $($tokens)* + }; +} + +/// Helper macro to set the default value of each option. +/// +/// This macro is an internal implementation detail of the `options` macro and should not be used +/// directly. +macro_rules! default { + () => { + Default::default() + }; + ($expr:expr) => { + $expr + }; +} + +/// Helper macro to set the conversion to CLI arguments for each option. +/// +/// This macro is an internal implementation detail of the `options` macro and should not be used +/// directly. +macro_rules! as_args { + ($flag:literal) => { + |field, args| AsArgs::as_args(field, args, $flag) + }; + ($expr:expr) => { + $expr + }; +} + +/// Helper function to ignore an option when converting it into CLI arguments. +/// +/// This function is only used inside `options` and should not be used in other contexts. +pub(super) fn ignore(_: &T, _: &mut Vec) {} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/options/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/options/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/options/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/options/mod.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,2008 @@ +//! Declarations and setter methods for `bindgen` options. +//! +//! The main entry point of this module is the `options` macro. +#[macro_use] +mod helpers; +mod as_args; + +use crate::callbacks::ParseCallbacks; +use crate::codegen::{ + AliasVariation, EnumVariation, MacroTypeVariation, NonCopyUnionStyle, +}; +use crate::deps::DepfileSpec; +use crate::features::{RustFeatures, RustTarget}; +use crate::regex_set::RegexSet; +use crate::Abi; +use crate::Builder; +use crate::CodegenConfig; +use crate::FieldVisibilityKind; +use crate::Formatter; +use crate::HashMap; +use crate::DEFAULT_ANON_FIELDS_PREFIX; + +use std::env; +#[cfg(feature = "experimental")] +use std::path::Path; +use std::path::PathBuf; +use std::rc::Rc; + +use as_args::AsArgs; +use helpers::ignore; + +/// Macro used to generate the [`BindgenOptions`] type and the [`Builder`] setter methods for each +/// one of the fields of `BindgenOptions`. +/// +/// The input format of this macro resembles a `struct` pattern. Each field of the `BindgenOptions` +/// type is declared by adding the name of the field and its type using the `name: type` syntax and +/// a block of code with the following items: +/// +/// - `default`: The default value for the field. If this item is omitted, `Default::default()` is +/// used instead, meaning that the type of the field must implement `Default`. +/// - `methods`: A block of code containing methods for the `Builder` type. These methods should be +/// related to the field being declared. +/// - `as_args`: This item declares how the field should be converted into a valid CLI argument for +/// `bindgen` and is used in the [`Builder::command_line_flags`] method which is used to do a +/// roundtrip test of the CLI args in the `bindgen-test` crate. This item can take one of the +/// following: +/// - A string literal with the flag if the type of the field implements the [`AsArgs`] trait. +/// - A closure with the signature `|field, args: &mut Vec| -> ()` that pushes arguments +/// into the `args` buffer based on the value of the field. This is used if the field does not +/// implement `AsArgs` or if the implementation of the trait is not logically correct for the +/// option and a custom behavior must be taken into account. +/// - The `ignore` literal, which does not emit any CLI arguments for this field. This is useful +/// if the field cannot be used from the `bindgen` CLI. +/// +/// As an example, this would be the declaration of a `bool` field called `be_fun` whose default +/// value is `false` (the `Default` value for `bool`): +/// ```rust,ignore +/// be_fun: bool { +/// methods: { +/// /// Ask `bindgen` to be fun. This option is disabled by default. +/// fn be_fun(mut self) -> Self { +/// self.options.be_fun = true; +/// self +/// } +/// }, +/// as_args: "--be-fun", +/// } +/// ``` +/// +/// However, we could also set the `be_fun` field to `true` by default and use a `--not-fun` flag +/// instead. This means that we have to add the `default` item and use a closure in the `as_args` +/// item: +/// ```rust,ignore +/// be_fun: bool { +/// default: true, +/// methods: { +/// /// Ask `bindgen` to not be fun. `bindgen` is fun by default. +/// fn not_fun(mut self) -> Self { +/// self.options.be_fun = false; +/// self +/// } +/// }, +/// as_args: |be_fun, args| (!be_fun).as_args(args, "--not-fun"), +/// } +/// ``` +/// More complex examples can be found in the sole invocation of this macro. +macro_rules! options { + ($( + $(#[doc = $docs:literal])+ + $field:ident: $ty:ty { + $(default: $default:expr,)? + methods: {$($methods_tokens:tt)*}$(,)? + as_args: $as_args:expr$(,)? + }$(,)? + )*) => { + #[derive(Debug, Clone)] + pub(crate) struct BindgenOptions { + $($(#[doc = $docs])* pub(crate) $field: $ty,)* + } + + impl Default for BindgenOptions { + fn default() -> Self { + Self { + $($field: default!($($default)*),)* + } + } + } + + impl Builder { + /// Generates the command line flags used to create this [`Builder`]. + pub fn command_line_flags(&self) -> Vec { + let mut args = vec![]; + + let headers = match self.options.input_headers.split_last() { + Some((header, headers)) => { + // The last input header is passed as an argument in the first position. + args.push(header.clone()); + headers + }, + None => &[] + }; + + $({ + let func: fn(&$ty, &mut Vec) = as_args!($as_args); + func(&self.options.$field, &mut args); + })* + + // Add the `--experimental` flag if `bindgen` is built with the `experimental` + // feature. + if cfg!(feature = "experimental") { + args.push("--experimental".to_owned()); + } + + // Add all the clang arguments. + args.push("--".to_owned()); + + if !self.options.clang_args.is_empty() { + args.extend_from_slice(&self.options.clang_args); + } + + // We need to pass all but the last header via the `-include` clang argument. + for header in headers { + args.push("-include".to_owned()); + args.push(header.clone()); + } + + args + } + + $($($methods_tokens)*)* + } + }; +} + +options! { + /// Types that have been blocklisted and should not appear anywhere in the generated code. + blocklisted_types: RegexSet { + methods: { + regex_option! { + /// Do not generate any bindings for the given type. + pub fn blocklist_type>(mut self, arg: T) -> Builder { + self.options.blocklisted_types.insert(arg); + self + } + } + }, + as_args: "--blocklist-type", + }, + /// Functions that have been blocklisted and should not appear in the generated code. + blocklisted_functions: RegexSet { + methods: { + regex_option! { + /// Do not generate any bindings for the given function. + pub fn blocklist_function>(mut self, arg: T) -> Builder { + self.options.blocklisted_functions.insert(arg); + self + } + } + }, + as_args: "--blocklist-function", + }, + /// Items that have been blocklisted and should not appear in the generated code. + blocklisted_items: RegexSet { + methods: { + regex_option! { + /// Do not generate any bindings for the given item, regardless of whether it is a + /// type, function, module, etc. + pub fn blocklist_item>(mut self, arg: T) -> Builder { + self.options.blocklisted_items.insert(arg); + self + } + } + }, + as_args: "--blocklist-item", + }, + /// Files whose contents should be blocklisted and should not appear in the generated code. + blocklisted_files: RegexSet { + methods: { + regex_option! { + /// Do not generate any bindings for the contents of the given file, regardless of + /// whether the contents of the file are types, functions, modules, etc. + pub fn blocklist_file>(mut self, arg: T) -> Builder { + self.options.blocklisted_files.insert(arg); + self + } + } + }, + as_args: "--blocklist-file", + }, + /// Types that should be treated as opaque structures in the generated code. + opaque_types: RegexSet { + methods: { + regex_option! { + /// Treat the given type as opaque in the generated bindings. + /// + /// Opaque in this context means that none of the generated bindings will contain + /// information about the inner representation of the type and the type itself will + /// be represented as a chunk of bytes with the alignment and size of the type. + pub fn opaque_type>(mut self, arg: T) -> Builder { + self.options.opaque_types.insert(arg); + self + } + } + }, + as_args: "--opaque-type", + }, + /// The explicit `rustfmt` path. + rustfmt_path: Option { + methods: { + /// Set an explicit path to the `rustfmt` binary. + /// + /// This option only comes into effect if `rustfmt` is set to be the formatter used by + /// `bindgen`. Check the documentation of the [`Builder::formatter`] method for more + /// information. + pub fn with_rustfmt>(mut self, path: P) -> Self { + self.options.rustfmt_path = Some(path.into()); + self + } + }, + // This option cannot be set from the CLI. + as_args: ignore, + }, + /// The path to which we should write a Makefile-syntax depfile (if any). + depfile: Option { + methods: { + /// Add a depfile output which will be written alongside the generated bindings. + pub fn depfile, D: Into>( + mut self, + output_module: H, + depfile: D, + ) -> Builder { + self.options.depfile = Some(DepfileSpec { + output_module: output_module.into(), + depfile_path: depfile.into(), + }); + self + } + }, + as_args: |depfile, args| { + if let Some(depfile) = depfile { + args.push("--depfile".into()); + args.push(depfile.depfile_path.display().to_string()); + } + }, + }, + /// Types that have been allowlisted and should appear in the generated code. + allowlisted_types: RegexSet { + methods: { + regex_option! { + /// Generate bindings for the given type. + /// + /// This option is transitive by default. Check the documentation of the + /// [`Builder::allowlist_recursively`] method for further information. + pub fn allowlist_type>(mut self, arg: T) -> Builder { + self.options.allowlisted_types.insert(arg); + self + } + } + }, + as_args: "--allowlist-type", + }, + /// Functions that have been allowlisted and should appear in the generated code. + allowlisted_functions: RegexSet { + methods: { + regex_option! { + /// Generate bindings for the given function. + /// + /// This option is transitive by default. Check the documentation of the + /// [`Builder::allowlist_recursively`] method for further information. + pub fn allowlist_function>(mut self, arg: T) -> Builder { + self.options.allowlisted_functions.insert(arg); + self + } + } + }, + as_args: "--allowlist-function", + }, + /// Variables that have been allowlisted and should appear in the generated code. + allowlisted_vars: RegexSet { + methods: { + regex_option! { + /// Generate bindings for the given variable. + /// + /// This option is transitive by default. Check the documentation of the + /// [`Builder::allowlist_recursively`] method for further information. + pub fn allowlist_var>(mut self, arg: T) -> Builder { + self.options.allowlisted_vars.insert(arg); + self + } + } + }, + as_args: "--allowlist-var", + }, + /// Files whose contents have been allowlisted and should appear in the generated code. + allowlisted_files: RegexSet { + methods: { + regex_option! { + /// Generate bindings for the content of the given file. + /// + /// This option is transitive by default. Check the documentation of the + /// [`Builder::allowlist_recursively`] method for further information. + pub fn allowlist_file>(mut self, arg: T) -> Builder { + self.options.allowlisted_files.insert(arg); + self + } + } + }, + as_args: "--allowlist-file", + }, + /// The default style of for generated `enum`s. + default_enum_style: EnumVariation { + methods: { + /// Set the default style for generated `enum`s. + /// + /// If this method is not called, the [`EnumVariation::Consts`] style will be used by + /// default. + /// + /// To set the style for individual `enum`s, use [`Builder::bitfield_enum`], + /// [`Builder::newtype_enum`], [`Builder::newtype_global_enum`], + /// [`Builder::rustified_enum`], [`Builder::rustified_non_exhaustive_enum`], + /// [`Builder::constified_enum_module`] or [`Builder::constified_enum`]. + pub fn default_enum_style( + mut self, + arg: EnumVariation, + ) -> Builder { + self.options.default_enum_style = arg; + self + } + }, + as_args: |variation, args| { + if *variation != Default::default() { + args.push("--default-enum-style".to_owned()); + args.push(variation.to_string()); + } + }, + }, + /// `enum`s marked as bitfield-like. This is, newtypes with bitwise operations. + bitfield_enums: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as being bitfield-like. + /// + /// This is similar to the [`Builder::newtype_enum`] style, but with the bitwise + /// operators implemented. + pub fn bitfield_enum>(mut self, arg: T) -> Builder { + self.options.bitfield_enums.insert(arg); + self + } + } + }, + as_args: "--bitfield-enum", + }, + /// `enum`s marked as newtypes. + newtype_enums: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as a newtype. + /// + /// This means that an integer newtype will be declared to represent the `enum` + /// type and its variants will be represented as constants inside of this type's + /// `impl` block. + pub fn newtype_enum>(mut self, arg: T) -> Builder { + self.options.newtype_enums.insert(arg); + self + } + } + }, + as_args: "--newtype-enum", + }, + /// `enum`s marked as global newtypes . + newtype_global_enums: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as a global newtype. + /// + /// This is similar to the [`Builder::newtype_enum`] style, but the constants for + /// each variant are free constants instead of being declared inside an `impl` + /// block for the newtype. + pub fn newtype_global_enum>(mut self, arg: T) -> Builder { + self.options.newtype_global_enums.insert(arg); + self + } + } + }, + as_args: "--newtype-global-enum", + }, + /// `enum`s marked as Rust `enum`s. + rustified_enums: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as a Rust `enum`. + /// + /// This means that each variant of the `enum` will be represented as a Rust `enum` + /// variant. + /// + /// **Use this with caution**, creating an instance of a Rust `enum` with an + /// invalid value will cause undefined behaviour. To avoid this, use the + /// [`Builder::newtype_enum`] style instead. + pub fn rustified_enum>(mut self, arg: T) -> Builder { + self.options.rustified_enums.insert(arg); + self + } + } + }, + as_args: "--rustified-enum", + }, + /// `enum`s marked as non-exhaustive Rust `enum`s. + rustified_non_exhaustive_enums: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as a non-exhaustive Rust `enum`. + /// + /// This is similar to the [`Builder::rustified_enum`] style, but the `enum` is + /// tagged with the `#[non_exhaustive]` attribute. + pub fn rustified_non_exhaustive_enum>(mut self, arg: T) -> Builder { + self.options.rustified_non_exhaustive_enums.insert(arg); + self + } + } + }, + as_args: "--rustified-non-exhaustive-enums", + }, + /// `enum`s marked as modules of constants. + constified_enum_modules: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as a module with a set of integer constants. + pub fn constified_enum_module>(mut self, arg: T) -> Builder { + self.options.constified_enum_modules.insert(arg); + self + } + } + }, + as_args: "--constified-enum-module", + }, + /// `enum`s marked as a set of constants. + constified_enums: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as a set o integer constants. + /// + /// This is similar to the [`Builder::constified_enum_module`] style, but the + /// constants are generated in the current module instead of in a new module. + pub fn constified_enum>(mut self, arg: T) -> Builder { + self.options.constified_enums.insert(arg); + self + } + } + }, + as_args: "--constified-enum", + }, + /// The default type signedness for C macro constants. + default_macro_constant_type: MacroTypeVariation { + methods: { + /// Set the default type signedness to be used for macro constants. + /// + /// If this method is not called, [`MacroTypeVariation::Unsigned`] is used by default. + /// + /// To set the type for individual macro constants, use the + /// [`ParseCallbacks::int_macro`] method. + pub fn default_macro_constant_type(mut self, arg: MacroTypeVariation) -> Builder { + self.options.default_macro_constant_type = arg; + self + } + + }, + as_args: |variation, args| { + if *variation != Default::default() { + args.push("--default-macro-constant-type".to_owned()); + args.push(variation.to_string()); + } + }, + }, + /// The default style of code generation for `typedef`s. + default_alias_style: AliasVariation { + methods: { + /// Set the default style of code generation for `typedef`s. + /// + /// If this method is not called, the [`AliasVariation::TypeAlias`] style is used by + /// default. + /// + /// To set the style for individual `typedefs`s, use [`Builder::type_alias`], + /// [`Builder::new_type_alias`] or [`Builder::new_type_alias_deref`]. + pub fn default_alias_style( + mut self, + arg: AliasVariation, + ) -> Builder { + self.options.default_alias_style = arg; + self + } + }, + as_args: |variation, args| { + if *variation != Default::default() { + args.push("--default-alias-style".to_owned()); + args.push(variation.to_string()); + } + }, + }, + /// `typedef` patterns that will use regular type aliasing. + type_alias: RegexSet { + methods: { + regex_option! { + /// Mark the given `typedef` as a regular Rust `type` alias. + /// + /// This is the default behavior, meaning that this method only comes into effect + /// if a style different from [`AliasVariation::TypeAlias`] was passed to the + /// [`Builder::default_alias_style`] method. + pub fn type_alias>(mut self, arg: T) -> Builder { + self.options.type_alias.insert(arg); + self + } + } + }, + as_args: "--type-alias", + }, + /// `typedef` patterns that will be aliased by creating a newtype. + new_type_alias: RegexSet { + methods: { + regex_option! { + /// Mark the given `typedef` as a Rust newtype by having the aliased + /// type be wrapped in a `struct` with `#[repr(transparent)]`. + /// + /// This method can be used to enforce stricter type checking. + pub fn new_type_alias>(mut self, arg: T) -> Builder { + self.options.new_type_alias.insert(arg); + self + } + } + }, + as_args: "--new-type-alias", + }, + /// `typedef` patterns that will be wrapped in a newtype implementing `Deref` and `DerefMut`. + new_type_alias_deref: RegexSet { + methods: { + regex_option! { + /// Mark the given `typedef` to be generated as a newtype that can be dereferenced. + /// + /// This is similar to the [`Builder::new_type_alias`] style, but the newtype + /// implements `Deref` and `DerefMut` with the aliased type as a target. + pub fn new_type_alias_deref>(mut self, arg: T) -> Builder { + self.options.new_type_alias_deref.insert(arg); + self + } + } + }, + as_args: "--new-type-alias-deref", + }, + /// The default style of code to generate for `union`s containing non-`Copy` members. + default_non_copy_union_style: NonCopyUnionStyle { + methods: { + /// Set the default style of code to generate for `union`s with non-`Copy` members. + /// + /// If this method is not called, the [`NonCopyUnionStyle::BindgenWrapper`] style is + /// used by default. + /// + /// To set the style for individual `union`s, use [`Builder::bindgen_wrapper_union`] or + /// [`Builder::manually_drop_union`]. + pub fn default_non_copy_union_style(mut self, arg: NonCopyUnionStyle) -> Self { + self.options.default_non_copy_union_style = arg; + self + } + }, + as_args: |style, args| { + if *style != Default::default() { + args.push("--default-non-copy-union-style".to_owned()); + args.push(style.to_string()); + } + }, + }, + /// The patterns marking non-`Copy` `union`s as using the `bindgen` generated wrapper. + bindgen_wrapper_union: RegexSet { + methods: { + regex_option! { + /// Mark the given `union` to use a `bindgen`-generated wrapper for its members if at + /// least one them is not `Copy`. + /// + /// This is the default behavior, meaning that this method only comes into effect + /// if a style different from [`NonCopyUnionStyle::BindgenWrapper`] was passed to + /// the [`Builder::default_non_copy_union_style`] method. + pub fn bindgen_wrapper_union>(mut self, arg: T) -> Self { + self.options.bindgen_wrapper_union.insert(arg); + self + } + } + }, + as_args: "--bindgen-wrapper-union", + }, + /// The patterns marking non-`Copy` `union`s as using the `ManuallyDrop` wrapper. + manually_drop_union: RegexSet { + methods: { + regex_option! { + /// Mark the given `union` to use [`::core::mem::ManuallyDrop`] for its members if + /// at least one of them is not `Copy`. + /// + /// The `ManuallyDrop` type was stabilized in Rust 1.20.0, do not use this option + /// if your target version is lower than this. + pub fn manually_drop_union>(mut self, arg: T) -> Self { + self.options.manually_drop_union.insert(arg); + self + } + } + + }, + as_args: "--manually-drop-union", + }, + + + /// Whether we should generate built-in definitions. + builtins: bool { + methods: { + /// Generate Rust bindings for built-in definitions (for example `__builtin_va_list`). + /// + /// Bindings for built-in definitions are not emitted by default. + pub fn emit_builtins(mut self) -> Builder { + self.options.builtins = true; + self + } + }, + as_args: "--builtins", + }, + /// Whether we should dump the Clang AST for debugging purposes. + emit_ast: bool { + methods: { + /// Emit the Clang AST to `stdout` for debugging purposes. + /// + /// The Clang AST is not emitted by default. + pub fn emit_clang_ast(mut self) -> Builder { + self.options.emit_ast = true; + self + } + }, + as_args: "--emit-clang-ast", + }, + /// Whether we should dump our IR for debugging purposes. + emit_ir: bool { + methods: { + /// Emit the `bindgen` internal representation to `stdout` for debugging purposes. + /// + /// This internal representation is not emitted by default. + pub fn emit_ir(mut self) -> Builder { + self.options.emit_ir = true; + self + } + }, + as_args: "--emit-ir", + }, + /// Output path for the `graphviz` DOT file. + emit_ir_graphviz: Option { + methods: { + /// Set the path for the file where the`bindgen` internal representation will be + /// emitted as a graph using the `graphviz` DOT language. + /// + /// This graph representation is not emitted by default. + pub fn emit_ir_graphviz>(mut self, path: T) -> Builder { + let path = path.into(); + self.options.emit_ir_graphviz = Some(path); + self + } + }, + as_args: "--emit-ir-graphviz", + }, + + /// Whether we should emulate C++ namespaces with Rust modules. + enable_cxx_namespaces: bool { + methods: { + /// Emulate C++ namespaces using Rust modules in the generated bindings. + /// + /// C++ namespaces are not emulated by default. + pub fn enable_cxx_namespaces(mut self) -> Builder { + self.options.enable_cxx_namespaces = true; + self + } + }, + as_args: "--enable-cxx-namespaces", + }, + /// Whether we should try to find unexposed attributes in functions. + enable_function_attribute_detection: bool { + methods: { + /// Enable detecting function attributes on C functions. + /// + /// This enables the following features: + /// - Add `#[must_use]` attributes to Rust items whose C counterparts are marked as so. + /// This feature also requires that the Rust target version supports the attribute. + /// - Set `!` as the return type for Rust functions whose C counterparts are marked as + /// diverging. + /// + /// This option can be quite slow in some cases (check [#1465]), so it is disabled by + /// default. + /// + /// [#1465]: https://github.com/rust-lang/rust-bindgen/issues/1465 + pub fn enable_function_attribute_detection(mut self) -> Self { + self.options.enable_function_attribute_detection = true; + self + } + + }, + as_args: "--enable-function-attribute-detection", + }, + /// Whether we should avoid mangling names with namespaces. + disable_name_namespacing: bool { + methods: { + /// Disable name auto-namespacing. + /// + /// By default, `bindgen` mangles names like `foo::bar::Baz` to look like `foo_bar_Baz` + /// instead of just `Baz`. This method disables that behavior. + /// + /// Note that this does not change the names used for allowlisting and blocklisting, + /// which should still be mangled with the namespaces. Additionally, this option may + /// cause `bindgen` to generate duplicate names. + pub fn disable_name_namespacing(mut self) -> Builder { + self.options.disable_name_namespacing = true; + self + } + }, + as_args: "--disable-name-namespacing", + }, + /// Whether we should avoid generating nested `struct` names. + disable_nested_struct_naming: bool { + methods: { + /// Disable nested `struct` naming. + /// + /// The following `struct`s have different names for C and C++. In C, they are visible + /// as `foo` and `bar`. In C++, they are visible as `foo` and `foo::bar`. + /// + /// ```c + /// struct foo { + /// struct bar { + /// } b; + /// }; + /// ``` + /// + /// `bindgen` tries to avoid duplicate names by default, so it follows the C++ naming + /// convention and it generates `foo` and `foo_bar` instead of just `foo` and `bar`. + /// + /// This method disables this behavior and it is indented to be used only for headers + /// that were written in C. + pub fn disable_nested_struct_naming(mut self) -> Builder { + self.options.disable_nested_struct_naming = true; + self + } + }, + as_args: "--disable-nested-struct-naming", + }, + /// Whether we should avoid embedding version identifiers into source code. + disable_header_comment: bool { + methods: { + /// Do not insert the `bindgen` version identifier into the generated bindings. + /// + /// This identifier is inserted by default. + pub fn disable_header_comment(mut self) -> Self { + self.options.disable_header_comment = true; + self + } + + }, + as_args: "--disable-header-comment", + }, + /// Whether we should generate layout tests for generated `struct`s. + layout_tests: bool { + default: true, + methods: { + /// Set whether layout tests should be generated. + /// + /// Layout tests are generated by default. + pub fn layout_tests(mut self, doit: bool) -> Self { + self.options.layout_tests = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-layout-tests"), + }, + /// Whether we should implement `Debug` for types that cannot derive it. + impl_debug: bool { + methods: { + /// Set whether `Debug` should be implemented for types that cannot derive it. + /// + /// This option is disabled by default. + pub fn impl_debug(mut self, doit: bool) -> Self { + self.options.impl_debug = doit; + self + } + + }, + as_args: "--impl-debug", + }, + /// Whether we should implement `PartialEq` types that cannot derive it. + impl_partialeq: bool { + methods: { + /// Set whether `PartialEq` should be implemented for types that cannot derive it. + /// + /// This option is disabled by default. + pub fn impl_partialeq(mut self, doit: bool) -> Self { + self.options.impl_partialeq = doit; + self + } + }, + as_args: "--impl-partialeq", + }, + /// Whether we should derive `Copy` when possible. + derive_copy: bool { + default: true, + methods: { + /// Set whether the `Copy` trait should be derived when possible. + /// + /// `Copy` is derived by default. + pub fn derive_copy(mut self, doit: bool) -> Self { + self.options.derive_copy = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-derive-copy"), + }, + + /// Whether we should derive `Debug` when possible. + derive_debug: bool { + default: true, + methods: { + /// Set whether the `Debug` trait should be derived when possible. + /// + /// The [`Builder::impl_debug`] method can be used to implement `Debug` for types that + /// cannot derive it. + /// + /// `Debug` is derived by default. + pub fn derive_debug(mut self, doit: bool) -> Self { + self.options.derive_debug = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-derive-debug"), + }, + + /// Whether we should derive `Default` when possible. + derive_default: bool { + methods: { + /// Set whether the `Default` trait should be derived when possible. + /// + /// `Default` is not derived by default. + pub fn derive_default(mut self, doit: bool) -> Self { + self.options.derive_default = doit; + self + } + }, + as_args: |&value, args| { + let arg = if value { + "--with-derive-default" + } else { + "--no-derive-default" + }; + + args.push(arg.to_owned()); + }, + }, + /// Whether we should derive `Hash` when possible. + derive_hash: bool { + methods: { + /// Set whether the `Hash` trait should be derived when possible. + /// + /// `Hash` is not derived by default. + pub fn derive_hash(mut self, doit: bool) -> Self { + self.options.derive_hash = doit; + self + } + }, + as_args: "--with-derive-hash", + }, + /// Whether we should derive `PartialOrd` when possible. + derive_partialord: bool { + methods: { + /// Set whether the `PartialOrd` trait should be derived when possible. + /// + /// Take into account that `Ord` cannot be derived for a type that does not implement + /// `PartialOrd`. For this reason, setting this method to `false` also sets + /// automatically [`Builder::derive_ord`] to `false`. + /// + /// `PartialOrd` is not derived by default. + pub fn derive_partialord(mut self, doit: bool) -> Self { + self.options.derive_partialord = doit; + if !doit { + self.options.derive_ord = false; + } + self + } + }, + as_args: "--with-derive-partialord", + }, + /// Whether we should derive `Ord` when possible. + derive_ord: bool { + methods: { + /// Set whether the `Ord` trait should be derived when possible. + /// + /// Take into account that `Ord` cannot be derived for a type that does not implement + /// `PartialOrd`. For this reason, the value set with this method will also be set + /// automatically for [`Builder::derive_partialord`]. + /// + /// `Ord` is not derived by default. + pub fn derive_ord(mut self, doit: bool) -> Self { + self.options.derive_ord = doit; + self.options.derive_partialord = doit; + self + } + }, + as_args: "--with-derive-ord", + }, + /// Whether we should derive `PartialEq` when possible. + derive_partialeq: bool { + methods: { + /// Set whether the `PartialEq` trait should be derived when possible. + /// + /// Take into account that `Eq` cannot be derived for a type that does not implement + /// `PartialEq`. For this reason, setting this method to `false` also sets + /// automatically [`Builder::derive_eq`] to `false`. + /// + /// The [`Builder::impl_partialeq`] method can be used to implement `PartialEq` for + /// types that cannot derive it. + /// + /// `PartialEq` is not derived by default. + pub fn derive_partialeq(mut self, doit: bool) -> Self { + self.options.derive_partialeq = doit; + if !doit { + self.options.derive_eq = false; + } + self + } + }, + as_args: "--with-derive-partialeq", + }, + /// Whether we should derive `Eq` when possible. + derive_eq: bool { + methods: { + /// Set whether the `Eq` trait should be derived when possible. + /// + /// Take into account that `Eq` cannot be derived for a type that does not implement + /// `PartialEq`. For this reason, the value set with this method will also be set + /// automatically for [`Builder::derive_partialeq`]. + /// + /// `Eq` is not derived by default. + pub fn derive_eq(mut self, doit: bool) -> Self { + self.options.derive_eq = doit; + if doit { + self.options.derive_partialeq = doit; + } + self + } + }, + as_args: "--with-derive-eq", + }, + /// Whether we should use `core` instead of `std`. + /// + /// If this option is enabled and the Rust target version is greater than 1.64, the prefix for + /// C platform-specific types will be `::core::ffi` instead of `::core::os::raw`. + use_core: bool { + methods: { + /// Use `core` instead of `std` in the generated bindings. + /// + /// `std` is used by default. + pub fn use_core(mut self) -> Builder { + self.options.use_core = true; + self + } + + }, + as_args: "--use-core", + }, + /// An optional prefix for the C platform-specific types. + ctypes_prefix: Option { + methods: { + /// Use the given prefix for the C platform-specific types instead of `::std::os::raw`. + /// + /// Alternatively, the [`Builder::use_core`] method can be used to set the prefix to + /// `::core::ffi` or `::core::os::raw`. + pub fn ctypes_prefix>(mut self, prefix: T) -> Builder { + self.options.ctypes_prefix = Some(prefix.into()); + self + } + }, + as_args: "--ctypes-prefix", + }, + /// The prefix for anonymous fields. + anon_fields_prefix: String { + default: DEFAULT_ANON_FIELDS_PREFIX.into(), + methods: { + /// Use the given prefix for the anonymous fields. + /// + /// An anonymous field, is a field of a C/C++ type that does not have a name. For + /// example, in the following C code: + /// ```c + /// struct integer { + /// struct { + /// int inner; + /// }; + /// } + /// ``` + /// + /// The only field of the `integer` `struct` is an anonymous field and its Rust + /// representation will be named using this prefix followed by an integer identifier. + /// + /// The default prefix is `__bindgen_anon_`. + pub fn anon_fields_prefix>(mut self, prefix: T) -> Builder { + self.options.anon_fields_prefix = prefix.into(); + self + } + }, + as_args: |prefix, args| { + if prefix != DEFAULT_ANON_FIELDS_PREFIX { + args.push("--anon-fields-prefix".to_owned()); + args.push(prefix.clone()); + } + }, + }, + /// Whether to measure the time for each one of the `bindgen` phases. + time_phases: bool { + methods: { + /// Set whether to measure the elapsed time for each one of the `bindgen` phases. This + /// information is printed to `stderr`. + /// + /// The elapsed time is not measured by default. + pub fn time_phases(mut self, doit: bool) -> Self { + self.options.time_phases = doit; + self + } + }, + as_args: "--time-phases", + }, + /// Whether to convert C float types to `f32` and `f64`. + convert_floats: bool { + default: true, + methods: { + /// Avoid converting C float types to `f32` and `f64`. + pub fn no_convert_floats(mut self) -> Self { + self.options.convert_floats = false; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-convert-floats"), + }, + /// The set of raw lines to be prepended to the top-level module of the generated Rust code. + raw_lines: Vec { + methods: { + /// Add a line of Rust code at the beginning of the generated bindings. The string is + /// passed through without any modification. + pub fn raw_line>(mut self, arg: T) -> Self { + self.options.raw_lines.push(arg.into()); + self + } + }, + as_args: |raw_lines, args| { + for line in raw_lines { + args.push("--raw-line".to_owned()); + args.push(line.clone()); + } + }, + }, + /// The set of raw lines to prepend to different modules. + module_lines: HashMap> { + methods: { + /// Add a given line to the beginning of a given module. + /// + /// This option only comes into effect if the [`Builder::enable_cxx_namespaces`] method + /// is also being called. + pub fn module_raw_line(mut self, module: T, line: U) -> Self + where + T: Into, + U: Into, + { + self.options + .module_lines + .entry(module.into()) + .or_insert_with(Vec::new) + .push(line.into()); + self + } + }, + as_args: |module_lines, args| { + for (module, lines) in module_lines { + for line in lines.iter() { + args.push("--module-raw-line".to_owned()); + args.push(module.clone()); + args.push(line.clone()); + } + } + }, + }, + /// The input header files. + input_headers: Vec { + methods: { + /// Add an input C/C++ header to generate bindings for. + /// + /// This can be used to generate bindings for a single header: + /// + /// ```ignore + /// let bindings = bindgen::Builder::default() + /// .header("input.h") + /// .generate() + /// .unwrap(); + /// ``` + /// + /// Or for multiple headers: + /// + /// ```ignore + /// let bindings = bindgen::Builder::default() + /// .header("first.h") + /// .header("second.h") + /// .header("third.h") + /// .generate() + /// .unwrap(); + /// ``` + pub fn header>(mut self, header: T) -> Builder { + self.options.input_headers.push(header.into()); + self + } + }, + // This field is handled specially inside the macro. + as_args: ignore, + }, + /// The set of arguments to be passed straight through to Clang. + clang_args: Vec { + methods: { + /// Add an argument to be passed straight through to Clang. + pub fn clang_arg>(self, arg: T) -> Builder { + self.clang_args([arg.into()]) + } + + /// Add several arguments to be passed straight through to Clang. + pub fn clang_args(mut self, args: I) -> Builder + where + I::Item: AsRef, + { + for arg in args { + self.options.clang_args.push(arg.as_ref().to_owned()); + } + self + } + }, + // This field is handled specially inside the macro. + as_args: ignore, + }, + /// Tuples of unsaved file contents of the form (name, contents). + input_header_contents: Vec<(String, String)> { + methods: { + /// Add `contents` as an input C/C++ header named `name`. + /// + /// This can be used to inject additional C/C++ code as an input without having to + /// create additional header files. + pub fn header_contents(mut self, name: &str, contents: &str) -> Builder { + // Apparently clang relies on having virtual FS correspondent to + // the real one, so we need absolute paths here + let absolute_path = env::current_dir() + .expect("Cannot retrieve current directory") + .join(name) + .to_str() + .expect("Cannot convert current directory name to string") + .to_owned(); + self.options + .input_header_contents + .push((absolute_path, contents.into())); + self + } + }, + // Header contents cannot be added from the CLI. + as_args: ignore, + }, + /// A user-provided visitor to allow customizing different kinds of situations. + parse_callbacks: Vec> { + methods: { + /// Add a new [`ParseCallbacks`] instance to configure types in different situations. + pub fn parse_callbacks(mut self, cb: Box) -> Self { + self.options.parse_callbacks.push(Rc::from(cb)); + self + } + }, + as_args: |_callbacks, _args| { + #[cfg(feature = "__cli")] + for cb in _callbacks { + _args.extend(cb.cli_args()); + } + }, + }, + /// Which kind of items should we generate. We generate all of them by default. + codegen_config: CodegenConfig { + default: CodegenConfig::all(), + methods: { + /// Do not generate any functions. + /// + /// Functions are generated by default. + pub fn ignore_functions(mut self) -> Builder { + self.options.codegen_config.remove(CodegenConfig::FUNCTIONS); + self + } + + /// Do not generate any methods. + /// + /// Methods are generated by default. + pub fn ignore_methods(mut self) -> Builder { + self.options.codegen_config.remove(CodegenConfig::METHODS); + self + } + + /// Choose what to generate using a [`CodegenConfig`]. + /// + /// This option overlaps with [`Builder::ignore_functions`] and + /// [`Builder::ignore_methods`]. + /// + /// All the items in `CodegenConfig` are generated by default. + pub fn with_codegen_config(mut self, config: CodegenConfig) -> Self { + self.options.codegen_config = config; + self + } + }, + as_args: |codegen_config, args| { + if !codegen_config.functions() { + args.push("--ignore-functions".to_owned()); + } + + args.push("--generate".to_owned()); + + //Temporary placeholder for the 4 options below. + let mut options: Vec = Vec::new(); + if codegen_config.functions() { + options.push("functions".to_owned()); + } + + if codegen_config.types() { + options.push("types".to_owned()); + } + + if codegen_config.vars() { + options.push("vars".to_owned()); + } + + if codegen_config.methods() { + options.push("methods".to_owned()); + } + + if codegen_config.constructors() { + options.push("constructors".to_owned()); + } + + if codegen_config.destructors() { + options.push("destructors".to_owned()); + } + + args.push(options.join(",")); + + if !codegen_config.methods() { + args.push("--ignore-methods".to_owned()); + } + }, + }, + /// Whether to treat inline namespaces conservatively. + conservative_inline_namespaces: bool { + methods: { + /// Treat inline namespaces conservatively. + /// + /// This is tricky, because in C++ is technically legal to override an item + /// defined in an inline namespace: + /// + /// ```cpp + /// inline namespace foo { + /// using Bar = int; + /// } + /// using Bar = long; + /// ``` + /// + /// Even though referencing `Bar` is a compiler error. + /// + /// We want to support this (arguably esoteric) use case, but we do not want to make + /// the rest of `bindgen` users pay an usability penalty for that. + /// + /// To support this, we need to keep all the inline namespaces around, but then using + /// `bindgen` becomes a bit more difficult, because you cannot reference paths like + /// `std::string` (you'd need to use the proper inline namespace). + /// + /// We could complicate a lot of the logic to detect name collisions and, in the + /// absence of collisions, generate a `pub use inline_ns::*` or something like that. + /// + /// That is probably something we can do to improve the usability of this option if we + /// realize it is needed way more often. Our guess is that this extra logic is not + /// going to be very useful. + /// + /// This option is disabled by default. + pub fn conservative_inline_namespaces(mut self) -> Builder { + self.options.conservative_inline_namespaces = true; + self + } + }, + as_args: "--conservative-inline-namespaces", + }, + /// Whether to keep documentation comments in the generated output. + generate_comments: bool { + default: true, + methods: { + /// Set whether the generated bindings should contain documentation comments. + /// + /// Documentation comments are included by default. + /// + /// Note that clang excludes comments from system headers by default, pass + /// `"-fretain-comments-from-system-headers"` to the [`Builder::clang_arg`] method to + /// include them. + /// + /// It is also possible to process all comments and not just documentation using the + /// `"-fparse-all-comments"` flag. Check [these slides on clang comment parsing]( + /// https://llvm.org/devmtg/2012-11/Gribenko_CommentParsing.pdf) for more information + /// and examples. + pub fn generate_comments(mut self, doit: bool) -> Self { + self.options.generate_comments = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-doc-comments"), + }, + /// Whether to generate inline functions. + generate_inline_functions: bool { + methods: { + /// Set whether to generate inline functions. + /// + /// This option is disabled by default. + /// + /// Note that they will usually not work. However you can use `-fkeep-inline-functions` + /// or `-fno-inline-functions` if you are responsible of compiling the library to make + /// them callable. + #[cfg_attr( + features = "experimental", + doc = "\nCheck the [`Builder::wrap_static_fns`] method for an alternative." + )] + pub fn generate_inline_functions(mut self, doit: bool) -> Self { + self.options.generate_inline_functions = doit; + self + } + }, + as_args: "--generate-inline-functions", + }, + /// Whether to allowlist types recursively. + allowlist_recursively: bool { + default: true, + methods: { + /// Set whether to recursively allowlist items. + /// + /// Items are allowlisted recursively by default. + /// + /// Given that we have explicitly allowlisted the `initiate_dance_party` function in + /// this C header: + /// + /// ```c + /// typedef struct MoonBoots { + /// int bouncy_level; + /// } MoonBoots; + /// + /// void initiate_dance_party(MoonBoots* boots); + /// ``` + /// + /// We would normally generate bindings to both the `initiate_dance_party` function and + /// the `MoonBoots` type that it transitively references. If `false` is passed to this + /// method, `bindgen` will not emit bindings for anything except the explicitly + /// allowlisted items, meaning that the definition for `MoonBoots` would not be + /// generated. However, the `initiate_dance_party` function would still reference + /// `MoonBoots`! + /// + /// **Disabling this feature will almost certainly cause `bindgen` to emit bindings + /// that will not compile!** If you disable this feature, then it is *your* + /// responsibility to provide definitions for every type that is referenced from an + /// explicitly allowlisted item. One way to provide the missing definitions is by using + /// the [`Builder::raw_line`] method, another would be to define them in Rust and then + /// `include!(...)` the bindings immediately afterwards. + pub fn allowlist_recursively(mut self, doit: bool) -> Self { + self.options.allowlist_recursively = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-recursive-allowlist"), + }, + /// Whether to emit `#[macro_use] extern crate objc;` instead of `use objc;` in the prologue of + /// the files generated from objective-c files. + objc_extern_crate: bool { + methods: { + /// Emit `#[macro_use] extern crate objc;` instead of `use objc;` in the prologue of + /// the files generated from objective-c files. + /// + /// `use objc;` is emitted by default. + pub fn objc_extern_crate(mut self, doit: bool) -> Self { + self.options.objc_extern_crate = doit; + self + } + }, + as_args: "--objc-extern-crate", + }, + /// Whether to generate proper block signatures instead of `void` pointers. + generate_block: bool { + methods: { + /// Generate proper block signatures instead of `void` pointers. + /// + /// `void` pointers are used by default. + pub fn generate_block(mut self, doit: bool) -> Self { + self.options.generate_block = doit; + self + } + }, + as_args: "--generate-block", + }, + /// Whether to emit `#[macro_use] extern crate block;` instead of `use block;` in the prologue + /// of the files generated from apple block files. + block_extern_crate: bool { + methods: { + /// Emit `#[macro_use] extern crate block;` instead of `use block;` in the prologue of + /// the files generated from apple block files. + /// + /// `use block;` is emitted by default. + pub fn block_extern_crate(mut self, doit: bool) -> Self { + self.options.block_extern_crate = doit; + self + } + }, + as_args: "--block-extern-crate", + }, + /// Whether to use the clang-provided name mangling. + enable_mangling: bool { + default: true, + methods: { + /// Set whether to use the clang-provided name mangling. This is probably needed for + /// C++ features. + /// + /// The mangling provided by clang is used by default. + /// + /// We allow disabling this option because some old `libclang` versions seem to return + /// incorrect results in some cases for non-mangled functions, check [#528] for more + /// information. + /// + /// [#528]: https://github.com/rust-lang/rust-bindgen/issues/528 + pub fn trust_clang_mangling(mut self, doit: bool) -> Self { + self.options.enable_mangling = doit; + self + } + + }, + as_args: |value, args| (!value).as_args(args, "--distrust-clang-mangling"), + }, + /// Whether to detect include paths using `clang_sys`. + detect_include_paths: bool { + default: true, + methods: { + /// Set whether to detect include paths using `clang_sys`. + /// + /// `clang_sys` is used to detect include paths by default. + pub fn detect_include_paths(mut self, doit: bool) -> Self { + self.options.detect_include_paths = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-include-path-detection"), + }, + /// Whether we should try to fit macro constants into types smaller than `u32` and `i32`. + fit_macro_constants: bool { + methods: { + /// Set whether `bindgen` should try to fit macro constants into types smaller than `u32` + /// and `i32`. + /// + /// This option is disabled by default. + pub fn fit_macro_constants(mut self, doit: bool) -> Self { + self.options.fit_macro_constants = doit; + self + } + }, + as_args: "--fit-macro-constant-types", + }, + /// Whether to prepend the `enum` name to constant or newtype variants. + prepend_enum_name: bool { + default: true, + methods: { + /// Set whether to prepend the `enum` name to constant or newtype variants. + /// + /// The `enum` name is prepended by default. + pub fn prepend_enum_name(mut self, doit: bool) -> Self { + self.options.prepend_enum_name = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-prepend-enum-name"), + }, + /// Version of the Rust compiler to target. + rust_target: RustTarget { + methods: { + /// Specify the Rust target version. + /// + /// The default target is the latest stable Rust version. + pub fn rust_target(mut self, rust_target: RustTarget) -> Self { + self.options.set_rust_target(rust_target); + self + } + }, + as_args: |rust_target, args| { + args.push("--rust-target".to_owned()); + args.push((*rust_target).into()); + }, + }, + /// Features to be enabled. They are derived from `rust_target`. + rust_features: RustFeatures { + default: RustTarget::default().into(), + methods: {}, + // This field cannot be set from the CLI, + as_args: ignore, + }, + /// Enable support for native Rust unions if they are supported. + untagged_union: bool { + default: true, + methods: { + /// Disable support for native Rust unions, if supported. + /// + /// The default value of this option is set based on the value passed to + /// [`Builder::rust_target`]. + pub fn disable_untagged_union(mut self) -> Self { + self.options.untagged_union = false; + self + } + } + as_args: |value, args| (!value).as_args(args, "--disable-untagged-union"), + }, + /// Whether we should record which items in the regex sets did match any C items. + record_matches: bool { + default: true, + methods: { + /// Set whether we should record which items in our regex sets did match any C items. + /// + /// Matches are recorded by default. + pub fn record_matches(mut self, doit: bool) -> Self { + self.options.record_matches = doit; + self + } + + }, + as_args: |value, args| (!value).as_args(args, "--no-record-matches"), + }, + /// Whether `size_t` should be translated to `usize` automatically. + size_t_is_usize: bool { + default: true, + methods: { + /// Set whether `size_t` should be translated to `usize`. + /// + /// `size_t` is translated to `usize` by default. + pub fn size_t_is_usize(mut self, is: bool) -> Self { + self.options.size_t_is_usize = is; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-size_t-is-usize"), + }, + /// The tool that should be used to format the generated bindings. + formatter: Formatter { + methods: { + /// Set whether `rustfmt` should be used to format the generated bindings. + /// + /// `rustfmt` is used by default. + /// + /// This method overlaps in functionality with the more general [`Builder::formatter`]. + /// Thus, the latter should be preferred. + #[deprecated] + pub fn rustfmt_bindings(mut self, doit: bool) -> Self { + self.options.formatter = if doit { + Formatter::Rustfmt + } else { + Formatter::None + }; + self + } + + /// Set which tool should be used to format the generated bindings. + /// + /// The default formatter is [`Formatter::Rustfmt`]. + /// + /// To be able to use `prettyplease` as a formatter, the `"prettyplease"` feature for + /// `bindgen` must be enabled in the Cargo manifest. + pub fn formatter(mut self, formatter: Formatter) -> Self { + self.options.formatter = formatter; + self + } + }, + as_args: |formatter, args| { + if *formatter != Default::default() { + args.push("--formatter".to_owned()); + args.push(formatter.to_string()); + } + }, + }, + /// The absolute path to the `rustfmt` configuration file. + rustfmt_configuration_file: Option { + methods: { + /// Set the absolute path to the `rustfmt` configuration file. + /// + /// The default `rustfmt` options are used if `None` is passed to this method or if + /// this method is not called at all. + /// + /// Calling this method will set the [`Builder::rustfmt_bindings`] option to `true` + /// and the [`Builder::formatter`] option to [`Formatter::Rustfmt`]. + pub fn rustfmt_configuration_file(mut self, path: Option) -> Self { + self = self.formatter(Formatter::Rustfmt); + self.options.rustfmt_configuration_file = path; + self + } + }, + as_args: "--rustfmt-configuration-file", + }, + /// Types that should not derive `PartialEq`. + no_partialeq_types: RegexSet { + methods: { + regex_option! { + /// Do not derive `PartialEq` for a given type. + pub fn no_partialeq>(mut self, arg: T) -> Builder { + self.options.no_partialeq_types.insert(arg.into()); + self + } + } + }, + as_args: "--no-partialeq", + }, + /// Types that should not derive `Copy`. + no_copy_types: RegexSet { + methods: { + regex_option! { + /// Do not derive `Copy` and `Clone` for a given type. + pub fn no_copy>(mut self, arg: T) -> Self { + self.options.no_copy_types.insert(arg.into()); + self + } + } + }, + as_args: "--no-copy", + }, + /// Types that should not derive `Debug`. + no_debug_types: RegexSet { + methods: { + regex_option! { + /// Do not derive `Debug` for a given type. + pub fn no_debug>(mut self, arg: T) -> Self { + self.options.no_debug_types.insert(arg.into()); + self + } + } + }, + as_args: "--no-debug", + }, + /// Types that should not derive or implement `Default`. + no_default_types: RegexSet { + methods: { + regex_option! { + /// Do not derive or implement `Default` for a given type. + pub fn no_default>(mut self, arg: T) -> Self { + self.options.no_default_types.insert(arg.into()); + self + } + } + }, + as_args: "--no-default", + }, + /// Types that should not derive `Hash`. + no_hash_types: RegexSet { + methods: { + regex_option! { + /// Do not derive `Hash` for a given type. + pub fn no_hash>(mut self, arg: T) -> Builder { + self.options.no_hash_types.insert(arg.into()); + self + } + } + }, + as_args: "--no-hash", + }, + /// Types that should be annotated with `#[must_use]`. + must_use_types: RegexSet { + methods: { + regex_option! { + /// Annotate the given type with the `#[must_use]` attribute. + pub fn must_use_type>(mut self, arg: T) -> Builder { + self.options.must_use_types.insert(arg.into()); + self + } + } + }, + as_args: "--must-use-type", + }, + /// Whether C arrays should be regular pointers in rust or array pointers + array_pointers_in_arguments: bool { + methods: { + /// Translate arrays `T arr[size]` into array pointers `*mut [T; size]` instead of + /// translating them as `*mut T` which is the default. + /// + /// The same is done for `*const` pointers. + pub fn array_pointers_in_arguments(mut self, doit: bool) -> Self { + self.options.array_pointers_in_arguments = doit; + self + } + + }, + as_args: "--use-array-pointers-in-arguments", + }, + /// The name of the `wasm_import_module`. + wasm_import_module_name: Option { + methods: { + /// Adds the `#[link(wasm_import_module = import_name)]` attribute to all the `extern` + /// blocks generated by `bindgen`. + /// + /// This attribute is not added by default. + pub fn wasm_import_module_name>( + mut self, + import_name: T, + ) -> Self { + self.options.wasm_import_module_name = Some(import_name.into()); + self + } + }, + as_args: "--wasm-import-module-name", + }, + /// The name of the dynamic library (if we are generating bindings for a shared library). + dynamic_library_name: Option { + methods: { + /// Generate bindings for a shared library with the given name. + /// + /// This option is disabled by default. + pub fn dynamic_library_name>( + mut self, + dynamic_library_name: T, + ) -> Self { + self.options.dynamic_library_name = Some(dynamic_library_name.into()); + self + } + }, + as_args: "--dynamic-loading", + }, + /// Whether to equire successful linkage for all routines in a shared library. + dynamic_link_require_all: bool { + methods: { + /// Set whether to require successful linkage for all routines in a shared library. + /// This allows us to optimize function calls by being able to safely assume function + /// pointers are valid. + /// + /// This option only comes into effect if the [`Builder::dynamic_library_name`] option + /// is set. + /// + /// This option is disabled by default. + pub fn dynamic_link_require_all(mut self, req: bool) -> Self { + self.options.dynamic_link_require_all = req; + self + } + }, + as_args: "--dynamic-link-require-all", + }, + /// Whether to only make generated bindings `pub` if the items would be publicly accessible by + /// C++. + respect_cxx_access_specs: bool { + methods: { + /// Set whether to respect the C++ access specifications. + /// + /// Passing `true` to this method will set the visibility of the generated Rust items + /// as `pub` only if the corresponding C++ items are publicly accessible instead of + /// marking all the items as public, which is the default. + pub fn respect_cxx_access_specs(mut self, doit: bool) -> Self { + self.options.respect_cxx_access_specs = doit; + self + } + + }, + as_args: "--respect-cxx-access-specs", + }, + /// Whether to translate `enum` integer types to native Rust integer types. + translate_enum_integer_types: bool { + methods: { + /// Set whether to always translate `enum` integer types to native Rust integer types. + /// + /// Passing `true` to this method will result in `enum`s having types such as `u32` and + /// `i16` instead of `c_uint` and `c_short` which is the default. The `#[repr]` types + /// of Rust `enum`s are always translated to Rust integer types. + pub fn translate_enum_integer_types(mut self, doit: bool) -> Self { + self.options.translate_enum_integer_types = doit; + self + } + }, + as_args: "--translate-enum-integer-types", + }, + /// Whether to generate types with C style naming. + c_naming: bool { + methods: { + /// Set whether to generate types with C style naming. + /// + /// Passing `true` to this method will add prefixes to the generated type names. For + /// example, instead of a `struct` with name `A` we will generate a `struct` with + /// `struct_A`. Currently applies to `struct`s, `union`s, and `enum`s. + pub fn c_naming(mut self, doit: bool) -> Self { + self.options.c_naming = doit; + self + } + }, + as_args: "--c-naming", + }, + /// Wether to always emit explicit padding fields. + force_explicit_padding: bool { + methods: { + /// Set whether to always emit explicit padding fields. + /// + /// This option should be enabled if a `struct` needs to be serialized in its native + /// format (padding bytes and all). This could be required if such `struct` will be + /// written to a file or sent over the network, as anything reading the padding bytes + /// of a struct may cause undefined behavior. + /// + /// Padding fields are not emitted by default. + pub fn explicit_padding(mut self, doit: bool) -> Self { + self.options.force_explicit_padding = doit; + self + } + }, + as_args: "--explicit-padding", + }, + /// Whether to emit vtable functions. + vtable_generation: bool { + methods: { + /// Set whether to enable experimental support to generate virtual table functions. + /// + /// This option should mostly work, though some edge cases are likely to be broken. + /// + /// Virtual table generation is disabled by default. + pub fn vtable_generation(mut self, doit: bool) -> Self { + self.options.vtable_generation = doit; + self + } + }, + as_args: "--vtable-generation", + }, + /// Whether to sort the generated Rust items. + sort_semantically: bool { + methods: { + /// Set whether to sort the generated Rust items in a predefined manner. + /// + /// Items are not ordered by default. + pub fn sort_semantically(mut self, doit: bool) -> Self { + self.options.sort_semantically = doit; + self + } + }, + as_args: "--sort-semantically", + }, + /// Whether to deduplicate `extern` blocks. + merge_extern_blocks: bool { + methods: { + /// Merge all extern blocks under the same module into a single one. + /// + /// Extern blocks are not merged by default. + pub fn merge_extern_blocks(mut self, doit: bool) -> Self { + self.options.merge_extern_blocks = doit; + self + } + }, + as_args: "--merge-extern-blocks", + }, + /// Whether to wrap unsafe operations in unsafe blocks. + wrap_unsafe_ops: bool { + methods: { + /// Wrap all unsafe operations in unsafe blocks. + /// + /// Unsafe operations are not wrapped by default. + pub fn wrap_unsafe_ops(mut self, doit: bool) -> Self { + self.options.wrap_unsafe_ops = doit; + self + } + }, + as_args: "--wrap-unsafe-ops", + }, + /// Patterns for functions whose ABI should be overriden. + abi_overrides: HashMap { + methods: { + regex_option! { + /// Override the ABI of a given function. + pub fn override_abi>(mut self, abi: Abi, arg: T) -> Self { + self.options + .abi_overrides + .entry(abi) + .or_default() + .insert(arg.into()); + self + } + } + }, + as_args: |overrides, args| { + for (abi, set) in overrides { + for item in set.get_items() { + args.push("--override-abi".to_owned()); + args.push(format!("{}={}", item, abi)); + } + } + }, + }, + /// Whether to generate wrappers for `static` functions. + wrap_static_fns: bool { + methods: { + #[cfg(feature = "experimental")] + /// Set whether to generate wrappers for `static`` functions. + /// + /// Passing `true` to this method will generate a C source file with non-`static` + /// functions that call the `static` functions found in the input headers and can be + /// called from Rust once the source file is compiled. + /// + /// The path of this source file can be set using the [`Builder::wrap_static_fns_path`] + /// method. + pub fn wrap_static_fns(mut self, doit: bool) -> Self { + self.options.wrap_static_fns = doit; + self + } + }, + as_args: "--wrap-static-fns", + }, + /// The suffix to be added to the function wrappers for `static` functions. + wrap_static_fns_suffix: Option { + methods: { + #[cfg(feature = "experimental")] + /// Set the suffix added to the wrappers for `static` functions. + /// + /// This option only comes into effect if `true` is passed to the + /// [`Builder::wrap_static_fns`] method. + /// + /// The default suffix is `__extern`. + pub fn wrap_static_fns_suffix>(mut self, suffix: T) -> Self { + self.options.wrap_static_fns_suffix = Some(suffix.as_ref().to_owned()); + self + } + }, + as_args: "--wrap-static-fns-suffix", + }, + /// The path of the file where the wrappers for `static` functions will be emitted. + wrap_static_fns_path: Option { + methods: { + #[cfg(feature = "experimental")] + /// Set the path for the source code file that would be created if any wrapper + /// functions must be generated due to the presence of `static` functions. + /// + /// `bindgen` will automatically add the right extension to the header and source code + /// files. + /// + /// This option only comes into effect if `true` is passed to the + /// [`Builder::wrap_static_fns`] method. + /// + /// The default path is `temp_dir/bindgen/extern`, where `temp_dir` is the path + /// returned by [`std::env::temp_dir`] . + pub fn wrap_static_fns_path>(mut self, path: T) -> Self { + self.options.wrap_static_fns_path = Some(path.as_ref().to_owned()); + self + } + }, + as_args: "--wrap-static-fns-path", + }, + /// Default visibility of fields. + default_visibility: FieldVisibilityKind { + methods: { + /// Set the default visibility of fields, including bitfields and accessor methods for + /// bitfields. + /// + /// This option only comes into effect if the [`Builder::respect_cxx_access_specs`] + /// option is disabled. + pub fn default_visibility( + mut self, + visibility: FieldVisibilityKind, + ) -> Self { + self.options.default_visibility = visibility; + self + } + }, + as_args: |visibility, args| { + if *visibility != Default::default() { + args.push("--default-visibility".to_owned()); + args.push(visibility.to_string()); + } + }, + }, + /// Whether to emit diagnostics or not. + emit_diagnostics: bool { + methods: { + #[cfg(feature = "experimental")] + /// Emit diagnostics. + /// + /// These diagnostics are emitted to `stderr` if you are using `bindgen-cli` or printed + /// using `cargo:warning=` if you are using `bindgen` as a `build-dependency`. + /// + /// Diagnostics are not emitted by default. + /// + /// The layout and contents of these diagnostic messages are not covered by versioning + /// and can change without notice. + pub fn emit_diagnostics(mut self) -> Self { + self.options.emit_diagnostics = true; + self + } + }, + as_args: "--emit-diagnostics", + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/parse.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/parse.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/parse.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/parse.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,41 @@ +//! Common traits and types related to parsing our IR from Clang cursors. +#![deny(clippy::missing_docs_in_private_items)] + +use crate::clang; +use crate::ir::context::{BindgenContext, ItemId}; + +/// Not so much an error in the traditional sense, but a control flow message +/// when walking over Clang's AST with a cursor. +#[derive(Debug)] +pub(crate) enum ParseError { + /// Recurse down the current AST node's children. + Recurse, + /// Continue on to the next sibling AST node, or back up to the parent's + /// siblings if we've exhausted all of this node's siblings (and so on). + Continue, +} + +/// The result of parsing a Clang AST node. +#[derive(Debug)] +pub(crate) enum ParseResult { + /// We've already resolved this item before, here is the extant `ItemId` for + /// it. + AlreadyResolved(ItemId), + + /// This is a newly parsed item. If the cursor is `Some`, it points to the + /// AST node where the new `T` was declared. + New(T, Option), +} + +/// An intermediate representation "sub-item" (i.e. one of the types contained +/// inside an `ItemKind` variant) that can be parsed from a Clang cursor. +pub(crate) trait ClangSubItemParser: Sized { + /// Attempt to parse this type from the given cursor. + /// + /// The fact that is a reference guarantees it's held by the context, and + /// allow returning already existing types. + fn parse( + cursor: clang::Cursor, + context: &mut BindgenContext, + ) -> Result, ParseError>; +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/README.md clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/README.md --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/README.md 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,83 +0,0 @@ -[![crates.io](https://img.shields.io/crates/v/bindgen.svg)](https://crates.io/crates/bindgen) -[![docs.rs](https://docs.rs/bindgen/badge.svg)](https://docs.rs/bindgen/) - -# `bindgen` - -**`bindgen` automatically generates Rust FFI bindings to C (and some C++) libraries.** - -For example, given the C header `doggo.h`: - -```c -typedef struct Doggo { - int many; - char wow; -} Doggo; - -void eleven_out_of_ten_majestic_af(Doggo* pupper); -``` - -`bindgen` produces Rust FFI code allowing you to call into the `doggo` library's -functions and use its types: - -```rust -/* automatically generated by rust-bindgen 0.99.9 */ - -#[repr(C)] -pub struct Doggo { - pub many: ::std::os::raw::c_int, - pub wow: ::std::os::raw::c_char, -} - -extern "C" { - pub fn eleven_out_of_ten_majestic_af(pupper: *mut Doggo); -} -``` - -## Users Guide - -[📚 Read the `bindgen` users guide here! 📚](https://rust-lang.github.io/rust-bindgen) - -## MSRV - -The minimum supported Rust version is **1.46**. - -No MSRV bump policy has been established yet, so MSRV may increase in any release. - -## API Reference - -[API reference documentation is on docs.rs](https://docs.rs/bindgen) - -## Environment Variables - -In addition to the [library API](https://docs.rs/bindgen) and [executable command-line API][bindgen-cmdline], -`bindgen` can be controlled through environment variables. - -End-users should set these environment variables to modify `bindgen`'s behavior without modifying the source code of direct consumers of `bindgen`. - -- `BINDGEN_EXTRA_CLANG_ARGS`: extra arguments to pass to `clang` - - Arguments are whitespace-separated - - Use shell-style quoting to pass through whitespace - - Examples: - - Specify alternate sysroot: `--sysroot=/path/to/sysroot` - - Add include search path with spaces: `-I"/path/with spaces"` -- `BINDGEN_EXTRA_CLANG_ARGS_`: similar to `BINDGEN_EXTRA_CLANG_ARGS`, - but used to set per-target arguments to pass to clang. Useful to set system include - directories in a target-specific way in cross-compilation environments with multiple targets. - Has precedence over `BINDGEN_EXTRA_CLANG_ARGS`. - -Additionally, `bindgen` uses `libclang` to parse C and C++ header files. -To modify how `bindgen` searches for `libclang`, see the [`clang-sys` documentation][clang-sys-env]. -For more details on how `bindgen` uses `libclang`, see the [`bindgen` users guide][bindgen-book-clang]. - -## Releases - -We don't follow a specific release calendar, but if you need a release please -file an issue requesting that (ping `@emilio` for increased effectiveness). - -## Contributing - -[See `CONTRIBUTING.md` for hacking on `bindgen`!](./CONTRIBUTING.md) - -[bindgen-cmdline]: https://rust-lang.github.io/rust-bindgen/command-line-usage.html -[clang-sys-env]: https://github.com/KyleMayes/clang-sys#environment-variables -[bindgen-book-clang]: https://rust-lang.github.io/rust-bindgen/requirements.html#clang diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/regex_set.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/regex_set.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/regex_set.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/regex_set.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,204 @@ +//! A type that represents the union of a set of regular expressions. +#![deny(clippy::missing_docs_in_private_items)] + +use regex::RegexSet as RxSet; +use std::cell::Cell; + +/// A dynamic set of regular expressions. +#[derive(Clone, Debug, Default)] +pub struct RegexSet { + items: Vec, + /// Whether any of the items in the set was ever matched. The length of this + /// vector is exactly the length of `items`. + matched: Vec>, + set: Option, + /// Whether we should record matching items in the `matched` vector or not. + record_matches: bool, +} + +impl RegexSet { + /// Create a new RegexSet + pub fn new() -> RegexSet { + RegexSet::default() + } + + /// Is this set empty? + pub fn is_empty(&self) -> bool { + self.items.is_empty() + } + + /// Insert a new regex into this set. + pub fn insert(&mut self, string: S) + where + S: AsRef, + { + self.items.push(string.as_ref().to_owned()); + self.matched.push(Cell::new(false)); + self.set = None; + } + + /// Returns slice of String from its field 'items' + pub fn get_items(&self) -> &[String] { + &self.items[..] + } + + /// Returns an iterator over regexes in the set which didn't match any + /// strings yet. + pub fn unmatched_items(&self) -> impl Iterator { + self.items.iter().enumerate().filter_map(move |(i, item)| { + if !self.record_matches || self.matched[i].get() { + return None; + } + + Some(item) + }) + } + + /// Construct a RegexSet from the set of entries we've accumulated. + /// + /// Must be called before calling `matches()`, or it will always return + /// false. + #[inline] + pub fn build(&mut self, record_matches: bool) { + self.build_inner(record_matches, None) + } + + #[cfg(all(feature = "__cli", feature = "experimental"))] + /// Construct a RegexSet from the set of entries we've accumulated and emit diagnostics if the + /// name of the regex set is passed to it. + /// + /// Must be called before calling `matches()`, or it will always return + /// false. + #[inline] + pub fn build_with_diagnostics( + &mut self, + record_matches: bool, + name: Option<&'static str>, + ) { + self.build_inner(record_matches, name) + } + + #[cfg(all(not(feature = "__cli"), feature = "experimental"))] + /// Construct a RegexSet from the set of entries we've accumulated and emit diagnostics if the + /// name of the regex set is passed to it. + /// + /// Must be called before calling `matches()`, or it will always return + /// false. + #[inline] + pub(crate) fn build_with_diagnostics( + &mut self, + record_matches: bool, + name: Option<&'static str>, + ) { + self.build_inner(record_matches, name) + } + + fn build_inner( + &mut self, + record_matches: bool, + _name: Option<&'static str>, + ) { + let items = self.items.iter().map(|item| format!("^({})$", item)); + self.record_matches = record_matches; + self.set = match RxSet::new(items) { + Ok(x) => Some(x), + Err(e) => { + warn!("Invalid regex in {:?}: {:?}", self.items, e); + #[cfg(feature = "experimental")] + if let Some(name) = _name { + invalid_regex_warning(self, e, name); + } + None + } + } + } + + /// Does the given `string` match any of the regexes in this set? + pub fn matches(&self, string: S) -> bool + where + S: AsRef, + { + let s = string.as_ref(); + let set = match self.set { + Some(ref set) => set, + None => return false, + }; + + if !self.record_matches { + return set.is_match(s); + } + + let matches = set.matches(s); + if !matches.matched_any() { + return false; + } + for i in matches.iter() { + self.matched[i].set(true); + } + + true + } +} + +#[cfg(feature = "experimental")] +fn invalid_regex_warning( + set: &RegexSet, + err: regex::Error, + name: &'static str, +) { + use crate::diagnostics::{Diagnostic, Level, Slice}; + + let mut diagnostic = Diagnostic::default(); + + match err { + regex::Error::Syntax(string) => { + if string.starts_with("regex parse error:\n") { + let mut source = String::new(); + + let mut parsing_source = true; + + for line in string.lines().skip(1) { + if parsing_source { + if line.starts_with(' ') { + source.push_str(line); + source.push('\n'); + continue; + } + parsing_source = false; + } + let error = "error: "; + if line.starts_with(error) { + let (_, msg) = line.split_at(error.len()); + diagnostic.add_annotation(msg.to_owned(), Level::Error); + } else { + diagnostic.add_annotation(line.to_owned(), Level::Info); + } + } + let mut slice = Slice::default(); + slice.with_source(source); + diagnostic.add_slice(slice); + + diagnostic.with_title( + "Error while parsing a regular expression.", + Level::Warn, + ); + } else { + diagnostic.with_title(string, Level::Warn); + } + } + err => { + let err = err.to_string(); + diagnostic.with_title(err, Level::Warn); + } + } + + diagnostic.add_annotation( + format!("This regular expression was passed via `{}`.", name), + Level::Note, + ); + + if set.items.iter().any(|item| item == "*") { + diagnostic.add_annotation("Wildcard patterns \"*\" are no longer considered valid. Use \".*\" instead.", Level::Help); + } + diagnostic.display(); +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/callbacks.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/callbacks.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/callbacks.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/callbacks.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ -//! A public API for more fine-grained customization of bindgen behavior. - -pub use crate::ir::analysis::DeriveTrait; -pub use crate::ir::derive::CanDerive as ImplementsTrait; -pub use crate::ir::enum_ty::{EnumVariantCustomBehavior, EnumVariantValue}; -pub use crate::ir::int::IntKind; -use std::fmt; -use std::panic::UnwindSafe; - -/// An enum to allow ignoring parsing of macros. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum MacroParsingBehavior { - /// Ignore the macro, generating no code for it, or anything that depends on - /// it. - Ignore, - /// The default behavior bindgen would have otherwise. - Default, -} - -impl Default for MacroParsingBehavior { - fn default() -> Self { - MacroParsingBehavior::Default - } -} - -/// A trait to allow configuring different kinds of types in different -/// situations. -pub trait ParseCallbacks: fmt::Debug + UnwindSafe { - /// This function will be run on every macro that is identified. - fn will_parse_macro(&self, _name: &str) -> MacroParsingBehavior { - MacroParsingBehavior::Default - } - - /// The integer kind an integer macro should have, given a name and the - /// value of that macro, or `None` if you want the default to be chosen. - fn int_macro(&self, _name: &str, _value: i64) -> Option { - None - } - - /// This will be run on every string macro. The callback cannot influence the further - /// treatment of the macro, but may use the value to generate additional code or configuration. - fn str_macro(&self, _name: &str, _value: &[u8]) {} - - /// This will be run on every function-like macro. The callback cannot - /// influence the further treatment of the macro, but may use the value to - /// generate additional code or configuration. - /// - /// The first parameter represents the name and argument list (including the - /// parentheses) of the function-like macro. The second parameter represents - /// the expansion of the macro as a sequence of tokens. - fn func_macro(&self, _name: &str, _value: &[&[u8]]) {} - - /// This function should return whether, given an enum variant - /// name, and value, this enum variant will forcibly be a constant. - fn enum_variant_behavior( - &self, - _enum_name: Option<&str>, - _original_variant_name: &str, - _variant_value: EnumVariantValue, - ) -> Option { - None - } - - /// Allows to rename an enum variant, replacing `_original_variant_name`. - fn enum_variant_name( - &self, - _enum_name: Option<&str>, - _original_variant_name: &str, - _variant_value: EnumVariantValue, - ) -> Option { - None - } - - /// Allows to rename an item, replacing `_original_item_name`. - fn item_name(&self, _original_item_name: &str) -> Option { - None - } - - /// This will be called on every file inclusion, with the full path of the included file. - fn include_file(&self, _filename: &str) {} - - /// This will be called to determine whether a particular blocklisted type - /// implements a trait or not. This will be used to implement traits on - /// other types containing the blocklisted type. - /// - /// * `None`: use the default behavior - /// * `Some(ImplementsTrait::Yes)`: `_name` implements `_derive_trait` - /// * `Some(ImplementsTrait::Manually)`: any type including `_name` can't - /// derive `_derive_trait` but can implemented it manually - /// * `Some(ImplementsTrait::No)`: `_name` doesn't implement `_derive_trait` - fn blocklisted_type_implements_trait( - &self, - _name: &str, - _derive_trait: DeriveTrait, - ) -> Option { - None - } - - /// Provide a list of custom derive attributes. - /// - /// If no additional attributes are wanted, this function should return an - /// empty `Vec`. - fn add_derives(&self, _name: &str) -> Vec { - vec![] - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/clang.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/clang.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/clang.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/clang.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2093 +0,0 @@ -//! A higher level Clang API built on top of the generated bindings in the -//! `clang_sys` module. - -#![allow(non_upper_case_globals, dead_code)] - -use crate::ir::context::BindgenContext; -use clang_sys::*; -use std::ffi::{CStr, CString}; -use std::fmt; -use std::hash::Hash; -use std::hash::Hasher; -use std::os::raw::{c_char, c_int, c_longlong, c_uint, c_ulong, c_ulonglong}; -use std::{mem, ptr, slice}; - -/// A cursor into the Clang AST, pointing to an AST node. -/// -/// We call the AST node pointed to by the cursor the cursor's "referent". -#[derive(Copy, Clone)] -pub struct Cursor { - x: CXCursor, -} - -impl fmt::Debug for Cursor { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!( - fmt, - "Cursor({} kind: {}, loc: {}, usr: {:?})", - self.spelling(), - kind_to_str(self.kind()), - self.location(), - self.usr() - ) - } -} - -impl Cursor { - /// Get the Unified Symbol Resolution for this cursor's referent, if - /// available. - /// - /// The USR can be used to compare entities across translation units. - pub fn usr(&self) -> Option { - let s = unsafe { cxstring_into_string(clang_getCursorUSR(self.x)) }; - if s.is_empty() { - None - } else { - Some(s) - } - } - - /// Is this cursor's referent a declaration? - pub fn is_declaration(&self) -> bool { - unsafe { clang_isDeclaration(self.kind()) != 0 } - } - - /// Get this cursor's referent's spelling. - pub fn spelling(&self) -> String { - unsafe { cxstring_into_string(clang_getCursorSpelling(self.x)) } - } - - /// Get this cursor's referent's display name. - /// - /// This is not necessarily a valid identifier. It includes extra - /// information, such as parameters for a function, etc. - pub fn display_name(&self) -> String { - unsafe { cxstring_into_string(clang_getCursorDisplayName(self.x)) } - } - - /// Get the mangled name of this cursor's referent. - pub fn mangling(&self) -> String { - unsafe { cxstring_into_string(clang_Cursor_getMangling(self.x)) } - } - - /// Gets the C++ manglings for this cursor, or an error if the manglings - /// are not available. - pub fn cxx_manglings(&self) -> Result, ()> { - use clang_sys::*; - unsafe { - let manglings = clang_Cursor_getCXXManglings(self.x); - if manglings.is_null() { - return Err(()); - } - let count = (*manglings).Count as usize; - - let mut result = Vec::with_capacity(count); - for i in 0..count { - let string_ptr = (*manglings).Strings.add(i); - result.push(cxstring_to_string_leaky(*string_ptr)); - } - clang_disposeStringSet(manglings); - Ok(result) - } - } - - /// Returns whether the cursor refers to a built-in definition. - pub fn is_builtin(&self) -> bool { - let (file, _, _, _) = self.location().location(); - file.name().is_none() - } - - /// Get the `Cursor` for this cursor's referent's lexical parent. - /// - /// The lexical parent is the parent of the definition. The semantic parent - /// is the parent of the declaration. Generally, the lexical parent doesn't - /// have any effect on semantics, while the semantic parent does. - /// - /// In the following snippet, the `Foo` class would be the semantic parent - /// of the out-of-line `method` definition, while the lexical parent is the - /// translation unit. - /// - /// ```c++ - /// class Foo { - /// void method(); - /// }; - /// - /// void Foo::method() { /* ... */ } - /// ``` - pub fn lexical_parent(&self) -> Cursor { - unsafe { - Cursor { - x: clang_getCursorLexicalParent(self.x), - } - } - } - - /// Get the referent's semantic parent, if one is available. - /// - /// See documentation for `lexical_parent` for details on semantic vs - /// lexical parents. - pub fn fallible_semantic_parent(&self) -> Option { - let sp = unsafe { - Cursor { - x: clang_getCursorSemanticParent(self.x), - } - }; - if sp == *self || !sp.is_valid() { - return None; - } - Some(sp) - } - - /// Get the referent's semantic parent. - /// - /// See documentation for `lexical_parent` for details on semantic vs - /// lexical parents. - pub fn semantic_parent(&self) -> Cursor { - self.fallible_semantic_parent().unwrap() - } - - /// Return the number of template arguments used by this cursor's referent, - /// if the referent is either a template instantiation. Returns `None` - /// otherwise. - /// - /// NOTE: This may not return `Some` for partial template specializations, - /// see #193 and #194. - pub fn num_template_args(&self) -> Option { - // XXX: `clang_Type_getNumTemplateArguments` is sort of reliable, while - // `clang_Cursor_getNumTemplateArguments` is totally unreliable. - // Therefore, try former first, and only fallback to the latter if we - // have to. - self.cur_type() - .num_template_args() - .or_else(|| { - let n: c_int = - unsafe { clang_Cursor_getNumTemplateArguments(self.x) }; - - if n >= 0 { - Some(n as u32) - } else { - debug_assert_eq!(n, -1); - None - } - }) - .or_else(|| { - let canonical = self.canonical(); - if canonical != *self { - canonical.num_template_args() - } else { - None - } - }) - } - - /// Get a cursor pointing to this referent's containing translation unit. - /// - /// Note that we shouldn't create a `TranslationUnit` struct here, because - /// bindgen assumes there will only be one of them alive at a time, and - /// disposes it on drop. That can change if this would be required, but I - /// think we can survive fine without it. - pub fn translation_unit(&self) -> Cursor { - assert!(self.is_valid()); - unsafe { - let tu = clang_Cursor_getTranslationUnit(self.x); - let cursor = Cursor { - x: clang_getTranslationUnitCursor(tu), - }; - assert!(cursor.is_valid()); - cursor - } - } - - /// Is the referent a top level construct? - pub fn is_toplevel(&self) -> bool { - let mut semantic_parent = self.fallible_semantic_parent(); - - while semantic_parent.is_some() && - (semantic_parent.unwrap().kind() == CXCursor_Namespace || - semantic_parent.unwrap().kind() == - CXCursor_NamespaceAlias || - semantic_parent.unwrap().kind() == CXCursor_NamespaceRef) - { - semantic_parent = - semantic_parent.unwrap().fallible_semantic_parent(); - } - - let tu = self.translation_unit(); - // Yes, this can happen with, e.g., macro definitions. - semantic_parent == tu.fallible_semantic_parent() - } - - /// There are a few kinds of types that we need to treat specially, mainly - /// not tracking the type declaration but the location of the cursor, given - /// clang doesn't expose a proper declaration for these types. - pub fn is_template_like(&self) -> bool { - matches!( - self.kind(), - CXCursor_ClassTemplate | - CXCursor_ClassTemplatePartialSpecialization | - CXCursor_TypeAliasTemplateDecl - ) - } - - /// Is this Cursor pointing to a function-like macro definition? - pub fn is_macro_function_like(&self) -> bool { - unsafe { clang_Cursor_isMacroFunctionLike(self.x) != 0 } - } - - /// Get the kind of referent this cursor is pointing to. - pub fn kind(&self) -> CXCursorKind { - self.x.kind - } - - /// Returns true if the cursor is a definition - pub fn is_definition(&self) -> bool { - unsafe { clang_isCursorDefinition(self.x) != 0 } - } - - /// Is the referent a template specialization? - pub fn is_template_specialization(&self) -> bool { - self.specialized().is_some() - } - - /// Is the referent a fully specialized template specialization without any - /// remaining free template arguments? - pub fn is_fully_specialized_template(&self) -> bool { - self.is_template_specialization() && - self.kind() != CXCursor_ClassTemplatePartialSpecialization && - self.num_template_args().unwrap_or(0) > 0 - } - - /// Is the referent a template specialization that still has remaining free - /// template arguments? - pub fn is_in_non_fully_specialized_template(&self) -> bool { - if self.is_toplevel() { - return false; - } - - let parent = self.semantic_parent(); - if parent.is_fully_specialized_template() { - return false; - } - - if !parent.is_template_like() { - return parent.is_in_non_fully_specialized_template(); - } - - true - } - - /// Is this cursor pointing a valid referent? - pub fn is_valid(&self) -> bool { - unsafe { clang_isInvalid(self.kind()) == 0 } - } - - /// Get the source location for the referent. - pub fn location(&self) -> SourceLocation { - unsafe { - SourceLocation { - x: clang_getCursorLocation(self.x), - } - } - } - - /// Get the source location range for the referent. - pub fn extent(&self) -> CXSourceRange { - unsafe { clang_getCursorExtent(self.x) } - } - - /// Get the raw declaration comment for this referent, if one exists. - pub fn raw_comment(&self) -> Option { - let s = unsafe { - cxstring_into_string(clang_Cursor_getRawCommentText(self.x)) - }; - if s.is_empty() { - None - } else { - Some(s) - } - } - - /// Get the referent's parsed comment. - pub fn comment(&self) -> Comment { - unsafe { - Comment { - x: clang_Cursor_getParsedComment(self.x), - } - } - } - - /// Get the referent's type. - pub fn cur_type(&self) -> Type { - unsafe { - Type { - x: clang_getCursorType(self.x), - } - } - } - - /// Given that this cursor's referent is a reference to another type, or is - /// a declaration, get the cursor pointing to the referenced type or type of - /// the declared thing. - pub fn definition(&self) -> Option { - unsafe { - let ret = Cursor { - x: clang_getCursorDefinition(self.x), - }; - - if ret.is_valid() && ret.kind() != CXCursor_NoDeclFound { - Some(ret) - } else { - None - } - } - } - - /// Given that this cursor's referent is reference type, get the cursor - /// pointing to the referenced type. - pub fn referenced(&self) -> Option { - unsafe { - let ret = Cursor { - x: clang_getCursorReferenced(self.x), - }; - - if ret.is_valid() { - Some(ret) - } else { - None - } - } - } - - /// Get the canonical cursor for this referent. - /// - /// Many types can be declared multiple times before finally being properly - /// defined. This method allows us to get the canonical cursor for the - /// referent type. - pub fn canonical(&self) -> Cursor { - unsafe { - Cursor { - x: clang_getCanonicalCursor(self.x), - } - } - } - - /// Given that this cursor points to either a template specialization or a - /// template instantiation, get a cursor pointing to the template definition - /// that is being specialized. - pub fn specialized(&self) -> Option { - unsafe { - let ret = Cursor { - x: clang_getSpecializedCursorTemplate(self.x), - }; - if ret.is_valid() { - Some(ret) - } else { - None - } - } - } - - /// Assuming that this cursor's referent is a template declaration, get the - /// kind of cursor that would be generated for its specializations. - pub fn template_kind(&self) -> CXCursorKind { - unsafe { clang_getTemplateCursorKind(self.x) } - } - - /// Traverse this cursor's referent and its children. - /// - /// Call the given function on each AST node traversed. - pub fn visit(&self, mut visitor: Visitor) - where - Visitor: FnMut(Cursor) -> CXChildVisitResult, - { - let data = &mut visitor as *mut Visitor; - unsafe { - clang_visitChildren(self.x, visit_children::, data.cast()); - } - } - - /// Collect all of this cursor's children into a vec and return them. - pub fn collect_children(&self) -> Vec { - let mut children = vec![]; - self.visit(|c| { - children.push(c); - CXChildVisit_Continue - }); - children - } - - /// Does this cursor have any children? - pub fn has_children(&self) -> bool { - let mut has_children = false; - self.visit(|_| { - has_children = true; - CXChildVisit_Break - }); - has_children - } - - /// Does this cursor have at least `n` children? - pub fn has_at_least_num_children(&self, n: usize) -> bool { - assert!(n > 0); - let mut num_left = n; - self.visit(|_| { - num_left -= 1; - if num_left == 0 { - CXChildVisit_Break - } else { - CXChildVisit_Continue - } - }); - num_left == 0 - } - - /// Returns whether the given location contains a cursor with the given - /// kind in the first level of nesting underneath (doesn't look - /// recursively). - pub fn contains_cursor(&self, kind: CXCursorKind) -> bool { - let mut found = false; - - self.visit(|c| { - if c.kind() == kind { - found = true; - CXChildVisit_Break - } else { - CXChildVisit_Continue - } - }); - - found - } - - /// Is the referent an inlined function? - pub fn is_inlined_function(&self) -> bool { - unsafe { clang_Cursor_isFunctionInlined(self.x) != 0 } - } - - /// Is the referent a defaulted function? - pub fn is_defaulted_function(&self) -> bool { - unsafe { clang_CXXMethod_isDefaulted(self.x) != 0 } - } - - /// Is the referent a deleted function? - pub fn is_deleted_function(&self) -> bool { - // Unfortunately, libclang doesn't yet have an API for checking if a - // member function is deleted, but the following should be a good - // enough approximation. - // Deleted functions are implicitly inline according to paragraph 4 of - // [dcl.fct.def.delete] in the C++ standard. Normal inline functions - // have a definition in the same translation unit, so if this is an - // inline function without a definition, and it's not a defaulted - // function, we can reasonably safely conclude that it's a deleted - // function. - self.is_inlined_function() && - self.definition().is_none() && - !self.is_defaulted_function() - } - - /// Get the width of this cursor's referent bit field, or `None` if the - /// referent is not a bit field. - pub fn bit_width(&self) -> Option { - unsafe { - let w = clang_getFieldDeclBitWidth(self.x); - if w == -1 { - None - } else { - Some(w as u32) - } - } - } - - /// Get the integer representation type used to hold this cursor's referent - /// enum type. - pub fn enum_type(&self) -> Option { - unsafe { - let t = Type { - x: clang_getEnumDeclIntegerType(self.x), - }; - if t.is_valid() { - Some(t) - } else { - None - } - } - } - - /// Get the boolean constant value for this cursor's enum variant referent. - /// - /// Returns None if the cursor's referent is not an enum variant. - pub fn enum_val_boolean(&self) -> Option { - unsafe { - if self.kind() == CXCursor_EnumConstantDecl { - Some(clang_getEnumConstantDeclValue(self.x) != 0) - } else { - None - } - } - } - - /// Get the signed constant value for this cursor's enum variant referent. - /// - /// Returns None if the cursor's referent is not an enum variant. - pub fn enum_val_signed(&self) -> Option { - unsafe { - if self.kind() == CXCursor_EnumConstantDecl { - Some(clang_getEnumConstantDeclValue(self.x) as i64) - } else { - None - } - } - } - - /// Get the unsigned constant value for this cursor's enum variant referent. - /// - /// Returns None if the cursor's referent is not an enum variant. - pub fn enum_val_unsigned(&self) -> Option { - unsafe { - if self.kind() == CXCursor_EnumConstantDecl { - Some(clang_getEnumConstantDeclUnsignedValue(self.x) as u64) - } else { - None - } - } - } - - /// Whether this cursor has the `warn_unused_result` attribute. - pub fn has_warn_unused_result_attr(&self) -> bool { - // FIXME(emilio): clang-sys doesn't expose this (from clang 9). - const CXCursor_WarnUnusedResultAttr: CXCursorKind = 440; - self.has_attr("warn_unused_result", Some(CXCursor_WarnUnusedResultAttr)) - } - - /// Does this cursor have the given attribute? - /// - /// `name` is checked against unexposed attributes. - fn has_attr(&self, name: &str, clang_kind: Option) -> bool { - let mut found_attr = false; - self.visit(|cur| { - let kind = cur.kind(); - found_attr = clang_kind.map_or(false, |k| k == kind) || - (kind == CXCursor_UnexposedAttr && - cur.tokens().iter().any(|t| { - t.kind == CXToken_Identifier && - t.spelling() == name.as_bytes() - })); - - if found_attr { - CXChildVisit_Break - } else { - CXChildVisit_Continue - } - }); - - found_attr - } - - /// Given that this cursor's referent is a `typedef`, get the `Type` that is - /// being aliased. - pub fn typedef_type(&self) -> Option { - let inner = Type { - x: unsafe { clang_getTypedefDeclUnderlyingType(self.x) }, - }; - - if inner.is_valid() { - Some(inner) - } else { - None - } - } - - /// Get the linkage kind for this cursor's referent. - /// - /// This only applies to functions and variables. - pub fn linkage(&self) -> CXLinkageKind { - unsafe { clang_getCursorLinkage(self.x) } - } - - /// Get the visibility of this cursor's referent. - pub fn visibility(&self) -> CXVisibilityKind { - unsafe { clang_getCursorVisibility(self.x) } - } - - /// Given that this cursor's referent is a function, return cursors to its - /// parameters. - /// - /// Returns None if the cursor's referent is not a function/method call or - /// declaration. - pub fn args(&self) -> Option> { - // match self.kind() { - // CXCursor_FunctionDecl | - // CXCursor_CXXMethod => { - self.num_args().ok().map(|num| { - (0..num) - .map(|i| Cursor { - x: unsafe { clang_Cursor_getArgument(self.x, i as c_uint) }, - }) - .collect() - }) - } - - /// Given that this cursor's referent is a function/method call or - /// declaration, return the number of arguments it takes. - /// - /// Returns Err if the cursor's referent is not a function/method call or - /// declaration. - pub fn num_args(&self) -> Result { - unsafe { - let w = clang_Cursor_getNumArguments(self.x); - if w == -1 { - Err(()) - } else { - Ok(w as u32) - } - } - } - - /// Get the access specifier for this cursor's referent. - pub fn access_specifier(&self) -> CX_CXXAccessSpecifier { - unsafe { clang_getCXXAccessSpecifier(self.x) } - } - - /// Is the cursor's referrent publically accessible in C++? - /// - /// Returns true if self.access_specifier() is `CX_CXXPublic` or - /// `CX_CXXInvalidAccessSpecifier`. - pub fn public_accessible(&self) -> bool { - let access = self.access_specifier(); - access == CX_CXXPublic || access == CX_CXXInvalidAccessSpecifier - } - - /// Is this cursor's referent a field declaration that is marked as - /// `mutable`? - pub fn is_mutable_field(&self) -> bool { - unsafe { clang_CXXField_isMutable(self.x) != 0 } - } - - /// Get the offset of the field represented by the Cursor. - pub fn offset_of_field(&self) -> Result { - let offset = unsafe { clang_Cursor_getOffsetOfField(self.x) }; - - if offset < 0 { - Err(LayoutError::from(offset as i32)) - } else { - Ok(offset as usize) - } - } - - /// Is this cursor's referent a member function that is declared `static`? - pub fn method_is_static(&self) -> bool { - unsafe { clang_CXXMethod_isStatic(self.x) != 0 } - } - - /// Is this cursor's referent a member function that is declared `const`? - pub fn method_is_const(&self) -> bool { - unsafe { clang_CXXMethod_isConst(self.x) != 0 } - } - - /// Is this cursor's referent a member function that is virtual? - pub fn method_is_virtual(&self) -> bool { - unsafe { clang_CXXMethod_isVirtual(self.x) != 0 } - } - - /// Is this cursor's referent a member function that is pure virtual? - pub fn method_is_pure_virtual(&self) -> bool { - unsafe { clang_CXXMethod_isPureVirtual(self.x) != 0 } - } - - /// Is this cursor's referent a struct or class with virtual members? - pub fn is_virtual_base(&self) -> bool { - unsafe { clang_isVirtualBase(self.x) != 0 } - } - - /// Try to evaluate this cursor. - pub fn evaluate(&self) -> Option { - EvalResult::new(*self) - } - - /// Return the result type for this cursor - pub fn ret_type(&self) -> Option { - let rt = Type { - x: unsafe { clang_getCursorResultType(self.x) }, - }; - if rt.is_valid() { - Some(rt) - } else { - None - } - } - - /// Gets the tokens that correspond to that cursor. - pub fn tokens(&self) -> RawTokens { - RawTokens::new(self) - } - - /// Gets the tokens that correspond to that cursor as `cexpr` tokens. - pub fn cexpr_tokens(self) -> Vec { - self.tokens() - .iter() - .filter_map(|token| token.as_cexpr_token()) - .collect() - } - - /// Obtain the real path name of a cursor of InclusionDirective kind. - /// - /// Returns None if the cursor does not include a file, otherwise the file's full name - pub fn get_included_file_name(&self) -> Option { - let file = unsafe { clang_sys::clang_getIncludedFile(self.x) }; - if file.is_null() { - None - } else { - Some(unsafe { - cxstring_into_string(clang_sys::clang_getFileName(file)) - }) - } - } -} - -/// A struct that owns the tokenizer result from a given cursor. -pub struct RawTokens<'a> { - cursor: &'a Cursor, - tu: CXTranslationUnit, - tokens: *mut CXToken, - token_count: c_uint, -} - -impl<'a> RawTokens<'a> { - fn new(cursor: &'a Cursor) -> Self { - let mut tokens = ptr::null_mut(); - let mut token_count = 0; - let range = cursor.extent(); - let tu = unsafe { clang_Cursor_getTranslationUnit(cursor.x) }; - unsafe { clang_tokenize(tu, range, &mut tokens, &mut token_count) }; - Self { - cursor, - tu, - tokens, - token_count, - } - } - - fn as_slice(&self) -> &[CXToken] { - if self.tokens.is_null() { - return &[]; - } - unsafe { slice::from_raw_parts(self.tokens, self.token_count as usize) } - } - - /// Get an iterator over these tokens. - pub fn iter(&self) -> ClangTokenIterator { - ClangTokenIterator { - tu: self.tu, - raw: self.as_slice().iter(), - } - } -} - -impl<'a> Drop for RawTokens<'a> { - fn drop(&mut self) { - if !self.tokens.is_null() { - unsafe { - clang_disposeTokens( - self.tu, - self.tokens, - self.token_count as c_uint, - ); - } - } - } -} - -/// A raw clang token, that exposes only kind, spelling, and extent. This is a -/// slightly more convenient version of `CXToken` which owns the spelling -/// string and extent. -#[derive(Debug)] -pub struct ClangToken { - spelling: CXString, - /// The extent of the token. This is the same as the relevant member from - /// `CXToken`. - pub extent: CXSourceRange, - /// The kind of the token. This is the same as the relevant member from - /// `CXToken`. - pub kind: CXTokenKind, -} - -impl ClangToken { - /// Get the token spelling, without being converted to utf-8. - pub fn spelling(&self) -> &[u8] { - let c_str = unsafe { - CStr::from_ptr(clang_getCString(self.spelling) as *const _) - }; - c_str.to_bytes() - } - - /// Converts a ClangToken to a `cexpr` token if possible. - pub fn as_cexpr_token(&self) -> Option { - use cexpr::token; - - let kind = match self.kind { - CXToken_Punctuation => token::Kind::Punctuation, - CXToken_Literal => token::Kind::Literal, - CXToken_Identifier => token::Kind::Identifier, - CXToken_Keyword => token::Kind::Keyword, - // NB: cexpr is not too happy about comments inside - // expressions, so we strip them down here. - CXToken_Comment => return None, - _ => { - warn!("Found unexpected token kind: {:?}", self); - return None; - } - }; - - Some(token::Token { - kind, - raw: self.spelling().to_vec().into_boxed_slice(), - }) - } -} - -impl Drop for ClangToken { - fn drop(&mut self) { - unsafe { clang_disposeString(self.spelling) } - } -} - -/// An iterator over a set of Tokens. -pub struct ClangTokenIterator<'a> { - tu: CXTranslationUnit, - raw: slice::Iter<'a, CXToken>, -} - -impl<'a> Iterator for ClangTokenIterator<'a> { - type Item = ClangToken; - - fn next(&mut self) -> Option { - let raw = self.raw.next()?; - unsafe { - let kind = clang_getTokenKind(*raw); - let spelling = clang_getTokenSpelling(self.tu, *raw); - let extent = clang_getTokenExtent(self.tu, *raw); - Some(ClangToken { - kind, - extent, - spelling, - }) - } - } -} - -/// Checks whether the name looks like an identifier, i.e. is alphanumeric -/// (including '_') and does not start with a digit. -pub fn is_valid_identifier(name: &str) -> bool { - let mut chars = name.chars(); - let first_valid = chars - .next() - .map(|c| c.is_alphabetic() || c == '_') - .unwrap_or(false); - - first_valid && chars.all(|c| c.is_alphanumeric() || c == '_') -} - -extern "C" fn visit_children( - cur: CXCursor, - _parent: CXCursor, - data: CXClientData, -) -> CXChildVisitResult -where - Visitor: FnMut(Cursor) -> CXChildVisitResult, -{ - let func: &mut Visitor = unsafe { &mut *(data as *mut Visitor) }; - let child = Cursor { x: cur }; - - (*func)(child) -} - -impl PartialEq for Cursor { - fn eq(&self, other: &Cursor) -> bool { - unsafe { clang_equalCursors(self.x, other.x) == 1 } - } -} - -impl Eq for Cursor {} - -impl Hash for Cursor { - fn hash(&self, state: &mut H) { - unsafe { clang_hashCursor(self.x) }.hash(state) - } -} - -/// The type of a node in clang's AST. -#[derive(Clone, Copy)] -pub struct Type { - x: CXType, -} - -impl PartialEq for Type { - fn eq(&self, other: &Self) -> bool { - unsafe { clang_equalTypes(self.x, other.x) != 0 } - } -} - -impl Eq for Type {} - -impl fmt::Debug for Type { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!( - fmt, - "Type({}, kind: {}, cconv: {}, decl: {:?}, canon: {:?})", - self.spelling(), - type_to_str(self.kind()), - self.call_conv(), - self.declaration(), - self.declaration().canonical() - ) - } -} - -/// An error about the layout of a struct, class, or type. -#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] -pub enum LayoutError { - /// Asked for the layout of an invalid type. - Invalid, - /// Asked for the layout of an incomplete type. - Incomplete, - /// Asked for the layout of a dependent type. - Dependent, - /// Asked for the layout of a type that does not have constant size. - NotConstantSize, - /// Asked for the layout of a field in a type that does not have such a - /// field. - InvalidFieldName, - /// An unknown layout error. - Unknown, -} - -impl ::std::convert::From for LayoutError { - fn from(val: i32) -> Self { - use self::LayoutError::*; - - match val { - CXTypeLayoutError_Invalid => Invalid, - CXTypeLayoutError_Incomplete => Incomplete, - CXTypeLayoutError_Dependent => Dependent, - CXTypeLayoutError_NotConstantSize => NotConstantSize, - CXTypeLayoutError_InvalidFieldName => InvalidFieldName, - _ => Unknown, - } - } -} - -impl Type { - /// Get this type's kind. - pub fn kind(&self) -> CXTypeKind { - self.x.kind - } - - /// Get a cursor pointing to this type's declaration. - pub fn declaration(&self) -> Cursor { - unsafe { - Cursor { - x: clang_getTypeDeclaration(self.x), - } - } - } - - /// Get the canonical declaration of this type, if it is available. - pub fn canonical_declaration( - &self, - location: Option<&Cursor>, - ) -> Option { - let mut declaration = self.declaration(); - if !declaration.is_valid() { - if let Some(location) = location { - let mut location = *location; - if let Some(referenced) = location.referenced() { - location = referenced; - } - if location.is_template_like() { - declaration = location; - } - } - } - - let canonical = declaration.canonical(); - if canonical.is_valid() && canonical.kind() != CXCursor_NoDeclFound { - Some(CanonicalTypeDeclaration(*self, canonical)) - } else { - None - } - } - - /// Get a raw display name for this type. - pub fn spelling(&self) -> String { - let s = unsafe { cxstring_into_string(clang_getTypeSpelling(self.x)) }; - // Clang 5.0 introduced changes in the spelling API so it returned the - // full qualified name. Let's undo that here. - if s.split("::").all(is_valid_identifier) { - if let Some(s) = s.split("::").last() { - return s.to_owned(); - } - } - - s - } - - /// Is this type const qualified? - pub fn is_const(&self) -> bool { - unsafe { clang_isConstQualifiedType(self.x) != 0 } - } - - #[inline] - fn is_non_deductible_auto_type(&self) -> bool { - debug_assert_eq!(self.kind(), CXType_Auto); - self.canonical_type() == *self - } - - #[inline] - fn clang_size_of(&self, ctx: &BindgenContext) -> c_longlong { - match self.kind() { - // Work-around https://bugs.llvm.org/show_bug.cgi?id=40975 - CXType_RValueReference | CXType_LValueReference => { - ctx.target_pointer_size() as c_longlong - } - // Work-around https://bugs.llvm.org/show_bug.cgi?id=40813 - CXType_Auto if self.is_non_deductible_auto_type() => -6, - _ => unsafe { clang_Type_getSizeOf(self.x) }, - } - } - - #[inline] - fn clang_align_of(&self, ctx: &BindgenContext) -> c_longlong { - match self.kind() { - // Work-around https://bugs.llvm.org/show_bug.cgi?id=40975 - CXType_RValueReference | CXType_LValueReference => { - ctx.target_pointer_size() as c_longlong - } - // Work-around https://bugs.llvm.org/show_bug.cgi?id=40813 - CXType_Auto if self.is_non_deductible_auto_type() => -6, - _ => unsafe { clang_Type_getAlignOf(self.x) }, - } - } - - /// What is the size of this type? Paper over invalid types by returning `0` - /// for them. - pub fn size(&self, ctx: &BindgenContext) -> usize { - let val = self.clang_size_of(ctx); - if val < 0 { - 0 - } else { - val as usize - } - } - - /// What is the size of this type? - pub fn fallible_size( - &self, - ctx: &BindgenContext, - ) -> Result { - let val = self.clang_size_of(ctx); - if val < 0 { - Err(LayoutError::from(val as i32)) - } else { - Ok(val as usize) - } - } - - /// What is the alignment of this type? Paper over invalid types by - /// returning `0`. - pub fn align(&self, ctx: &BindgenContext) -> usize { - let val = self.clang_align_of(ctx); - if val < 0 { - 0 - } else { - val as usize - } - } - - /// What is the alignment of this type? - pub fn fallible_align( - &self, - ctx: &BindgenContext, - ) -> Result { - let val = self.clang_align_of(ctx); - if val < 0 { - Err(LayoutError::from(val as i32)) - } else { - Ok(val as usize) - } - } - - /// Get the layout for this type, or an error describing why it does not - /// have a valid layout. - pub fn fallible_layout( - &self, - ctx: &BindgenContext, - ) -> Result { - use crate::ir::layout::Layout; - let size = self.fallible_size(ctx)?; - let align = self.fallible_align(ctx)?; - Ok(Layout::new(size, align)) - } - - /// Get the number of template arguments this type has, or `None` if it is - /// not some kind of template. - pub fn num_template_args(&self) -> Option { - let n = unsafe { clang_Type_getNumTemplateArguments(self.x) }; - if n >= 0 { - Some(n as u32) - } else { - debug_assert_eq!(n, -1); - None - } - } - - /// If this type is a class template specialization, return its - /// template arguments. Otherwise, return None. - pub fn template_args(&self) -> Option { - self.num_template_args().map(|n| TypeTemplateArgIterator { - x: self.x, - length: n, - index: 0, - }) - } - - /// Given that this type is a function prototype, return the types of its parameters. - /// - /// Returns None if the type is not a function prototype. - pub fn args(&self) -> Option> { - self.num_args().ok().map(|num| { - (0..num) - .map(|i| Type { - x: unsafe { clang_getArgType(self.x, i as c_uint) }, - }) - .collect() - }) - } - - /// Given that this type is a function prototype, return the number of arguments it takes. - /// - /// Returns Err if the type is not a function prototype. - pub fn num_args(&self) -> Result { - unsafe { - let w = clang_getNumArgTypes(self.x); - if w == -1 { - Err(()) - } else { - Ok(w as u32) - } - } - } - - /// Given that this type is a pointer type, return the type that it points - /// to. - pub fn pointee_type(&self) -> Option { - match self.kind() { - CXType_Pointer | - CXType_RValueReference | - CXType_LValueReference | - CXType_MemberPointer | - CXType_BlockPointer | - CXType_ObjCObjectPointer => { - let ret = Type { - x: unsafe { clang_getPointeeType(self.x) }, - }; - debug_assert!(ret.is_valid()); - Some(ret) - } - _ => None, - } - } - - /// Given that this type is an array, vector, or complex type, return the - /// type of its elements. - pub fn elem_type(&self) -> Option { - let current_type = Type { - x: unsafe { clang_getElementType(self.x) }, - }; - if current_type.is_valid() { - Some(current_type) - } else { - None - } - } - - /// Given that this type is an array or vector type, return its number of - /// elements. - pub fn num_elements(&self) -> Option { - let num_elements_returned = unsafe { clang_getNumElements(self.x) }; - if num_elements_returned != -1 { - Some(num_elements_returned as usize) - } else { - None - } - } - - /// Get the canonical version of this type. This sees through `typedef`s and - /// aliases to get the underlying, canonical type. - pub fn canonical_type(&self) -> Type { - unsafe { - Type { - x: clang_getCanonicalType(self.x), - } - } - } - - /// Is this type a variadic function type? - pub fn is_variadic(&self) -> bool { - unsafe { clang_isFunctionTypeVariadic(self.x) != 0 } - } - - /// Given that this type is a function type, get the type of its return - /// value. - pub fn ret_type(&self) -> Option { - let rt = Type { - x: unsafe { clang_getResultType(self.x) }, - }; - if rt.is_valid() { - Some(rt) - } else { - None - } - } - - /// Given that this type is a function type, get its calling convention. If - /// this is not a function type, `CXCallingConv_Invalid` is returned. - pub fn call_conv(&self) -> CXCallingConv { - unsafe { clang_getFunctionTypeCallingConv(self.x) } - } - - /// For elaborated types (types which use `class`, `struct`, or `union` to - /// disambiguate types from local bindings), get the underlying type. - pub fn named(&self) -> Type { - unsafe { - Type { - x: clang_Type_getNamedType(self.x), - } - } - } - - /// Is this a valid type? - pub fn is_valid(&self) -> bool { - self.kind() != CXType_Invalid - } - - /// Is this a valid and exposed type? - pub fn is_valid_and_exposed(&self) -> bool { - self.is_valid() && self.kind() != CXType_Unexposed - } - - /// Is this type a fully instantiated template? - pub fn is_fully_instantiated_template(&self) -> bool { - // Yep, the spelling of this containing type-parameter is extremely - // nasty... But can happen in . Unfortunately I couldn't - // reduce it enough :( - self.template_args().map_or(false, |args| args.len() > 0) && - !matches!( - self.declaration().kind(), - CXCursor_ClassTemplatePartialSpecialization | - CXCursor_TypeAliasTemplateDecl | - CXCursor_TemplateTemplateParameter - ) - } - - /// Is this type an associated template type? Eg `T::Associated` in - /// this example: - /// - /// ```c++ - /// template - /// class Foo { - /// typename T::Associated member; - /// }; - /// ``` - pub fn is_associated_type(&self) -> bool { - // This is terrible :( - fn hacky_parse_associated_type>(spelling: S) -> bool { - lazy_static! { - static ref ASSOC_TYPE_RE: regex::Regex = regex::Regex::new( - r"typename type\-parameter\-\d+\-\d+::.+" - ) - .unwrap(); - } - ASSOC_TYPE_RE.is_match(spelling.as_ref()) - } - - self.kind() == CXType_Unexposed && - (hacky_parse_associated_type(self.spelling()) || - hacky_parse_associated_type( - self.canonical_type().spelling(), - )) - } -} - -/// The `CanonicalTypeDeclaration` type exists as proof-by-construction that its -/// cursor is the canonical declaration for its type. If you have a -/// `CanonicalTypeDeclaration` instance, you know for sure that the type and -/// cursor match up in a canonical declaration relationship, and it simply -/// cannot be otherwise. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct CanonicalTypeDeclaration(Type, Cursor); - -impl CanonicalTypeDeclaration { - /// Get the type. - pub fn ty(&self) -> &Type { - &self.0 - } - - /// Get the type's canonical declaration cursor. - pub fn cursor(&self) -> &Cursor { - &self.1 - } -} - -/// An iterator for a type's template arguments. -pub struct TypeTemplateArgIterator { - x: CXType, - length: u32, - index: u32, -} - -impl Iterator for TypeTemplateArgIterator { - type Item = Type; - fn next(&mut self) -> Option { - if self.index < self.length { - let idx = self.index as c_uint; - self.index += 1; - Some(Type { - x: unsafe { clang_Type_getTemplateArgumentAsType(self.x, idx) }, - }) - } else { - None - } - } -} - -impl ExactSizeIterator for TypeTemplateArgIterator { - fn len(&self) -> usize { - assert!(self.index <= self.length); - (self.length - self.index) as usize - } -} - -/// A `SourceLocation` is a file, line, column, and byte offset location for -/// some source text. -pub struct SourceLocation { - x: CXSourceLocation, -} - -impl SourceLocation { - /// Get the (file, line, column, byte offset) tuple for this source - /// location. - pub fn location(&self) -> (File, usize, usize, usize) { - unsafe { - let mut file = mem::zeroed(); - let mut line = 0; - let mut col = 0; - let mut off = 0; - clang_getSpellingLocation( - self.x, &mut file, &mut line, &mut col, &mut off, - ); - (File { x: file }, line as usize, col as usize, off as usize) - } - } -} - -impl fmt::Display for SourceLocation { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let (file, line, col, _) = self.location(); - if let Some(name) = file.name() { - write!(f, "{}:{}:{}", name, line, col) - } else { - "builtin definitions".fmt(f) - } - } -} - -impl fmt::Debug for SourceLocation { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self) - } -} - -/// A comment in the source text. -/// -/// Comments are sort of parsed by Clang, and have a tree structure. -pub struct Comment { - x: CXComment, -} - -impl Comment { - /// What kind of comment is this? - pub fn kind(&self) -> CXCommentKind { - unsafe { clang_Comment_getKind(self.x) } - } - - /// Get this comment's children comment - pub fn get_children(&self) -> CommentChildrenIterator { - CommentChildrenIterator { - parent: self.x, - length: unsafe { clang_Comment_getNumChildren(self.x) }, - index: 0, - } - } - - /// Given that this comment is the start or end of an HTML tag, get its tag - /// name. - pub fn get_tag_name(&self) -> String { - unsafe { cxstring_into_string(clang_HTMLTagComment_getTagName(self.x)) } - } - - /// Given that this comment is an HTML start tag, get its attributes. - pub fn get_tag_attrs(&self) -> CommentAttributesIterator { - CommentAttributesIterator { - x: self.x, - length: unsafe { clang_HTMLStartTag_getNumAttrs(self.x) }, - index: 0, - } - } -} - -/// An iterator for a comment's children -pub struct CommentChildrenIterator { - parent: CXComment, - length: c_uint, - index: c_uint, -} - -impl Iterator for CommentChildrenIterator { - type Item = Comment; - fn next(&mut self) -> Option { - if self.index < self.length { - let idx = self.index; - self.index += 1; - Some(Comment { - x: unsafe { clang_Comment_getChild(self.parent, idx) }, - }) - } else { - None - } - } -} - -/// An HTML start tag comment attribute -pub struct CommentAttribute { - /// HTML start tag attribute name - pub name: String, - /// HTML start tag attribute value - pub value: String, -} - -/// An iterator for a comment's attributes -pub struct CommentAttributesIterator { - x: CXComment, - length: c_uint, - index: c_uint, -} - -impl Iterator for CommentAttributesIterator { - type Item = CommentAttribute; - fn next(&mut self) -> Option { - if self.index < self.length { - let idx = self.index; - self.index += 1; - Some(CommentAttribute { - name: unsafe { - cxstring_into_string(clang_HTMLStartTag_getAttrName( - self.x, idx, - )) - }, - value: unsafe { - cxstring_into_string(clang_HTMLStartTag_getAttrValue( - self.x, idx, - )) - }, - }) - } else { - None - } - } -} - -/// A source file. -pub struct File { - x: CXFile, -} - -impl File { - /// Get the name of this source file. - pub fn name(&self) -> Option { - if self.x.is_null() { - return None; - } - Some(unsafe { cxstring_into_string(clang_getFileName(self.x)) }) - } -} - -fn cxstring_to_string_leaky(s: CXString) -> String { - if s.data.is_null() { - return "".to_owned(); - } - let c_str = unsafe { CStr::from_ptr(clang_getCString(s) as *const _) }; - c_str.to_string_lossy().into_owned() -} - -fn cxstring_into_string(s: CXString) -> String { - let ret = cxstring_to_string_leaky(s); - unsafe { clang_disposeString(s) }; - ret -} - -/// An `Index` is an environment for a set of translation units that will -/// typically end up linked together in one final binary. -pub struct Index { - x: CXIndex, -} - -impl Index { - /// Construct a new `Index`. - /// - /// The `pch` parameter controls whether declarations in pre-compiled - /// headers are included when enumerating a translation unit's "locals". - /// - /// The `diag` parameter controls whether debugging diagnostics are enabled. - pub fn new(pch: bool, diag: bool) -> Index { - unsafe { - Index { - x: clang_createIndex(pch as c_int, diag as c_int), - } - } - } -} - -impl fmt::Debug for Index { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "Index {{ }}") - } -} - -impl Drop for Index { - fn drop(&mut self) { - unsafe { - clang_disposeIndex(self.x); - } - } -} - -/// A translation unit (or "compilation unit"). -pub struct TranslationUnit { - x: CXTranslationUnit, -} - -impl fmt::Debug for TranslationUnit { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "TranslationUnit {{ }}") - } -} - -impl TranslationUnit { - /// Parse a source file into a translation unit. - pub fn parse( - ix: &Index, - file: &str, - cmd_args: &[String], - unsaved: &[UnsavedFile], - opts: CXTranslationUnit_Flags, - ) -> Option { - let fname = CString::new(file).unwrap(); - let _c_args: Vec = cmd_args - .iter() - .map(|s| CString::new(s.clone()).unwrap()) - .collect(); - let c_args: Vec<*const c_char> = - _c_args.iter().map(|s| s.as_ptr()).collect(); - let mut c_unsaved: Vec = - unsaved.iter().map(|f| f.x).collect(); - let tu = unsafe { - clang_parseTranslationUnit( - ix.x, - fname.as_ptr(), - c_args.as_ptr(), - c_args.len() as c_int, - c_unsaved.as_mut_ptr(), - c_unsaved.len() as c_uint, - opts, - ) - }; - if tu.is_null() { - None - } else { - Some(TranslationUnit { x: tu }) - } - } - - /// Get the Clang diagnostic information associated with this translation - /// unit. - pub fn diags(&self) -> Vec { - unsafe { - let num = clang_getNumDiagnostics(self.x) as usize; - let mut diags = vec![]; - for i in 0..num { - diags.push(Diagnostic { - x: clang_getDiagnostic(self.x, i as c_uint), - }); - } - diags - } - } - - /// Get a cursor pointing to the root of this translation unit's AST. - pub fn cursor(&self) -> Cursor { - unsafe { - Cursor { - x: clang_getTranslationUnitCursor(self.x), - } - } - } - - /// Is this the null translation unit? - pub fn is_null(&self) -> bool { - self.x.is_null() - } -} - -impl Drop for TranslationUnit { - fn drop(&mut self) { - unsafe { - clang_disposeTranslationUnit(self.x); - } - } -} - -/// A diagnostic message generated while parsing a translation unit. -pub struct Diagnostic { - x: CXDiagnostic, -} - -impl Diagnostic { - /// Format this diagnostic message as a string, using the given option bit - /// flags. - pub fn format(&self) -> String { - unsafe { - let opts = clang_defaultDiagnosticDisplayOptions(); - cxstring_into_string(clang_formatDiagnostic(self.x, opts)) - } - } - - /// What is the severity of this diagnostic message? - pub fn severity(&self) -> CXDiagnosticSeverity { - unsafe { clang_getDiagnosticSeverity(self.x) } - } -} - -impl Drop for Diagnostic { - /// Destroy this diagnostic message. - fn drop(&mut self) { - unsafe { - clang_disposeDiagnostic(self.x); - } - } -} - -/// A file which has not been saved to disk. -pub struct UnsavedFile { - x: CXUnsavedFile, - /// The name of the unsaved file. Kept here to avoid leaving dangling pointers in - /// `CXUnsavedFile`. - pub name: CString, - contents: CString, -} - -impl UnsavedFile { - /// Construct a new unsaved file with the given `name` and `contents`. - pub fn new(name: &str, contents: &str) -> UnsavedFile { - let name = CString::new(name).unwrap(); - let contents = CString::new(contents).unwrap(); - let x = CXUnsavedFile { - Filename: name.as_ptr(), - Contents: contents.as_ptr(), - Length: contents.as_bytes().len() as c_ulong, - }; - UnsavedFile { x, name, contents } - } -} - -impl fmt::Debug for UnsavedFile { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!( - fmt, - "UnsavedFile(name: {:?}, contents: {:?})", - self.name, self.contents - ) - } -} - -/// Convert a cursor kind into a static string. -pub fn kind_to_str(x: CXCursorKind) -> String { - unsafe { cxstring_into_string(clang_getCursorKindSpelling(x)) } -} - -/// Convert a type kind to a static string. -pub fn type_to_str(x: CXTypeKind) -> String { - unsafe { cxstring_into_string(clang_getTypeKindSpelling(x)) } -} - -/// Dump the Clang AST to stdout for debugging purposes. -pub fn ast_dump(c: &Cursor, depth: isize) -> CXChildVisitResult { - fn print_indent>(depth: isize, s: S) { - for _ in 0..depth { - print!(" "); - } - println!("{}", s.as_ref()); - } - - fn print_cursor>(depth: isize, prefix: S, c: &Cursor) { - let prefix = prefix.as_ref(); - print_indent( - depth, - format!(" {}kind = {}", prefix, kind_to_str(c.kind())), - ); - print_indent( - depth, - format!(" {}spelling = \"{}\"", prefix, c.spelling()), - ); - print_indent(depth, format!(" {}location = {}", prefix, c.location())); - print_indent( - depth, - format!(" {}is-definition? {}", prefix, c.is_definition()), - ); - print_indent( - depth, - format!(" {}is-declaration? {}", prefix, c.is_declaration()), - ); - print_indent( - depth, - format!( - " {}is-inlined-function? {}", - prefix, - c.is_inlined_function() - ), - ); - - let templ_kind = c.template_kind(); - if templ_kind != CXCursor_NoDeclFound { - print_indent( - depth, - format!( - " {}template-kind = {}", - prefix, - kind_to_str(templ_kind) - ), - ); - } - if let Some(usr) = c.usr() { - print_indent(depth, format!(" {}usr = \"{}\"", prefix, usr)); - } - if let Ok(num) = c.num_args() { - print_indent(depth, format!(" {}number-of-args = {}", prefix, num)); - } - if let Some(num) = c.num_template_args() { - print_indent( - depth, - format!(" {}number-of-template-args = {}", prefix, num), - ); - } - if let Some(width) = c.bit_width() { - print_indent(depth, format!(" {}bit-width = {}", prefix, width)); - } - if let Some(ty) = c.enum_type() { - print_indent( - depth, - format!(" {}enum-type = {}", prefix, type_to_str(ty.kind())), - ); - } - if let Some(val) = c.enum_val_signed() { - print_indent(depth, format!(" {}enum-val = {}", prefix, val)); - } - if let Some(ty) = c.typedef_type() { - print_indent( - depth, - format!(" {}typedef-type = {}", prefix, type_to_str(ty.kind())), - ); - } - if let Some(ty) = c.ret_type() { - print_indent( - depth, - format!(" {}ret-type = {}", prefix, type_to_str(ty.kind())), - ); - } - - if let Some(refd) = c.referenced() { - if refd != *c { - println!(); - print_cursor( - depth, - String::from(prefix) + "referenced.", - &refd, - ); - } - } - - let canonical = c.canonical(); - if canonical != *c { - println!(); - print_cursor( - depth, - String::from(prefix) + "canonical.", - &canonical, - ); - } - - if let Some(specialized) = c.specialized() { - if specialized != *c { - println!(); - print_cursor( - depth, - String::from(prefix) + "specialized.", - &specialized, - ); - } - } - - if let Some(parent) = c.fallible_semantic_parent() { - println!(); - print_cursor( - depth, - String::from(prefix) + "semantic-parent.", - &parent, - ); - } - } - - fn print_type>(depth: isize, prefix: S, ty: &Type) { - let prefix = prefix.as_ref(); - - let kind = ty.kind(); - print_indent(depth, format!(" {}kind = {}", prefix, type_to_str(kind))); - if kind == CXType_Invalid { - return; - } - - print_indent(depth, format!(" {}cconv = {}", prefix, ty.call_conv())); - - print_indent( - depth, - format!(" {}spelling = \"{}\"", prefix, ty.spelling()), - ); - let num_template_args = - unsafe { clang_Type_getNumTemplateArguments(ty.x) }; - if num_template_args >= 0 { - print_indent( - depth, - format!( - " {}number-of-template-args = {}", - prefix, num_template_args - ), - ); - } - if let Some(num) = ty.num_elements() { - print_indent( - depth, - format!(" {}number-of-elements = {}", prefix, num), - ); - } - print_indent( - depth, - format!(" {}is-variadic? {}", prefix, ty.is_variadic()), - ); - - let canonical = ty.canonical_type(); - if canonical != *ty { - println!(); - print_type(depth, String::from(prefix) + "canonical.", &canonical); - } - - if let Some(pointee) = ty.pointee_type() { - if pointee != *ty { - println!(); - print_type(depth, String::from(prefix) + "pointee.", &pointee); - } - } - - if let Some(elem) = ty.elem_type() { - if elem != *ty { - println!(); - print_type(depth, String::from(prefix) + "elements.", &elem); - } - } - - if let Some(ret) = ty.ret_type() { - if ret != *ty { - println!(); - print_type(depth, String::from(prefix) + "return.", &ret); - } - } - - let named = ty.named(); - if named != *ty && named.is_valid() { - println!(); - print_type(depth, String::from(prefix) + "named.", &named); - } - } - - print_indent(depth, "("); - print_cursor(depth, "", c); - - println!(); - let ty = c.cur_type(); - print_type(depth, "type.", &ty); - - let declaration = ty.declaration(); - if declaration != *c && declaration.kind() != CXCursor_NoDeclFound { - println!(); - print_cursor(depth, "type.declaration.", &declaration); - } - - // Recurse. - let mut found_children = false; - c.visit(|s| { - if !found_children { - println!(); - found_children = true; - } - ast_dump(&s, depth + 1) - }); - - print_indent(depth, ")"); - - CXChildVisit_Continue -} - -/// Try to extract the clang version to a string -pub fn extract_clang_version() -> String { - unsafe { cxstring_into_string(clang_getClangVersion()) } -} - -/// A wrapper for the result of evaluating an expression. -#[derive(Debug)] -pub struct EvalResult { - x: CXEvalResult, -} - -impl EvalResult { - /// Evaluate `cursor` and return the result. - pub fn new(cursor: Cursor) -> Option { - // Work around https://bugs.llvm.org/show_bug.cgi?id=42532, see: - // * https://github.com/rust-lang/rust-bindgen/issues/283 - // * https://github.com/rust-lang/rust-bindgen/issues/1590 - { - let mut found_cant_eval = false; - cursor.visit(|c| { - if c.kind() == CXCursor_TypeRef && - c.cur_type().canonical_type().kind() == CXType_Unexposed - { - found_cant_eval = true; - return CXChildVisit_Break; - } - - CXChildVisit_Recurse - }); - - if found_cant_eval { - return None; - } - } - Some(EvalResult { - x: unsafe { clang_Cursor_Evaluate(cursor.x) }, - }) - } - - fn kind(&self) -> CXEvalResultKind { - unsafe { clang_EvalResult_getKind(self.x) } - } - - /// Try to get back the result as a double. - pub fn as_double(&self) -> Option { - match self.kind() { - CXEval_Float => { - Some(unsafe { clang_EvalResult_getAsDouble(self.x) } as f64) - } - _ => None, - } - } - - /// Try to get back the result as an integer. - pub fn as_int(&self) -> Option { - if self.kind() != CXEval_Int { - return None; - } - - if !clang_EvalResult_isUnsignedInt::is_loaded() { - // FIXME(emilio): There's no way to detect underflow here, and clang - // will just happily give us a value. - return Some(unsafe { clang_EvalResult_getAsInt(self.x) } as i64); - } - - if unsafe { clang_EvalResult_isUnsignedInt(self.x) } != 0 { - let value = unsafe { clang_EvalResult_getAsUnsigned(self.x) }; - if value > i64::max_value() as c_ulonglong { - return None; - } - - return Some(value as i64); - } - - let value = unsafe { clang_EvalResult_getAsLongLong(self.x) }; - if value > i64::max_value() as c_longlong { - return None; - } - if value < i64::min_value() as c_longlong { - return None; - } - Some(value as i64) - } - - /// Evaluates the expression as a literal string, that may or may not be - /// valid utf-8. - pub fn as_literal_string(&self) -> Option> { - match self.kind() { - CXEval_StrLiteral => { - let ret = unsafe { - CStr::from_ptr(clang_EvalResult_getAsStr(self.x)) - }; - Some(ret.to_bytes().to_vec()) - } - _ => None, - } - } -} - -impl Drop for EvalResult { - fn drop(&mut self) { - unsafe { clang_EvalResult_dispose(self.x) }; - } -} - -/// Target information obtained from libclang. -#[derive(Debug)] -pub struct TargetInfo { - /// The target triple. - pub triple: String, - /// The width of the pointer _in bits_. - pub pointer_width: usize, -} - -impl TargetInfo { - /// Tries to obtain target information from libclang. - pub fn new(tu: &TranslationUnit) -> Option { - if !clang_getTranslationUnitTargetInfo::is_loaded() { - return None; - } - let triple; - let pointer_width; - unsafe { - let ti = clang_getTranslationUnitTargetInfo(tu.x); - triple = cxstring_into_string(clang_TargetInfo_getTriple(ti)); - pointer_width = clang_TargetInfo_getPointerWidth(ti); - clang_TargetInfo_dispose(ti); - } - assert!(pointer_width > 0); - assert_eq!(pointer_width % 8, 0); - Some(TargetInfo { - triple, - pointer_width: pointer_width as usize, - }) - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/bitfield_unit.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/bitfield_unit.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/bitfield_unit.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/bitfield_unit.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,102 +0,0 @@ -#[repr(C)] -#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct __BindgenBitfieldUnit { - storage: Storage, -} - -impl __BindgenBitfieldUnit { - #[inline] - pub const fn new(storage: Storage) -> Self { - Self { storage } - } -} - -impl __BindgenBitfieldUnit -where - Storage: AsRef<[u8]> + AsMut<[u8]>, -{ - #[inline] - pub fn get_bit(&self, index: usize) -> bool { - debug_assert!(index / 8 < self.storage.as_ref().len()); - - let byte_index = index / 8; - let byte = self.storage.as_ref()[byte_index]; - - let bit_index = if cfg!(target_endian = "big") { - 7 - (index % 8) - } else { - index % 8 - }; - - let mask = 1 << bit_index; - - byte & mask == mask - } - - #[inline] - pub fn set_bit(&mut self, index: usize, val: bool) { - debug_assert!(index / 8 < self.storage.as_ref().len()); - - let byte_index = index / 8; - let byte = &mut self.storage.as_mut()[byte_index]; - - let bit_index = if cfg!(target_endian = "big") { - 7 - (index % 8) - } else { - index % 8 - }; - - let mask = 1 << bit_index; - if val { - *byte |= mask; - } else { - *byte &= !mask; - } - } - - #[inline] - pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { - debug_assert!(bit_width <= 64); - debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); - debug_assert!( - (bit_offset + (bit_width as usize)) / 8 <= - self.storage.as_ref().len() - ); - - let mut val = 0; - - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; - } - } - - val - } - - #[inline] - pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { - debug_assert!(bit_width <= 64); - debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); - debug_assert!( - (bit_offset + (bit_width as usize)) / 8 <= - self.storage.as_ref().len() - ); - - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/bitfield_unit_tests.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/bitfield_unit_tests.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/bitfield_unit_tests.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/bitfield_unit_tests.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,260 +0,0 @@ -//! Tests for `__BindgenBitfieldUnit`. -//! -//! Note that bit-fields are allocated right to left (least to most significant -//! bits). -//! -//! From the x86 PS ABI: -//! -//! ```c -//! struct { -//! int j : 5; -//! int k : 6; -//! int m : 7; -//! }; -//! ``` -//! -//! ```ignore -//! +------------------------------------------------------------+ -//! | | | | | -//! | padding | m | k | j | -//! |31 18|17 11|10 5|4 0| -//! +------------------------------------------------------------+ -//! ``` - -use super::bitfield_unit::__BindgenBitfieldUnit; - -#[test] -fn bitfield_unit_get_bit() { - let unit = __BindgenBitfieldUnit::<[u8; 2]>::new([0b10011101, 0b00011101]); - - let mut bits = vec![]; - for i in 0..16 { - bits.push(unit.get_bit(i)); - } - - println!(); - println!("bits = {:?}", bits); - assert_eq!( - bits, - &[ - // 0b10011101 - true, false, true, true, true, false, false, true, - // 0b00011101 - true, false, true, true, true, false, false, false - ] - ); -} - -#[test] -fn bitfield_unit_set_bit() { - let mut unit = - __BindgenBitfieldUnit::<[u8; 2]>::new([0b00000000, 0b00000000]); - - for i in 0..16 { - if i % 3 == 0 { - unit.set_bit(i, true); - } - } - - for i in 0..16 { - assert_eq!(unit.get_bit(i), i % 3 == 0); - } - - let mut unit = - __BindgenBitfieldUnit::<[u8; 2]>::new([0b11111111, 0b11111111]); - - for i in 0..16 { - if i % 3 == 0 { - unit.set_bit(i, false); - } - } - - for i in 0..16 { - assert_eq!(unit.get_bit(i), i % 3 != 0); - } -} - -macro_rules! bitfield_unit_get { - ( - $( - With $storage:expr , then get($start:expr, $len:expr) is $expected:expr; - )* - ) => { - #[test] - fn bitfield_unit_get() { - $({ - let expected = $expected; - let unit = __BindgenBitfieldUnit::<_>::new($storage); - let actual = unit.get($start, $len); - - println!(); - println!("expected = {:064b}", expected); - println!("actual = {:064b}", actual); - - assert_eq!(expected, actual); - })* - } - } -} - -bitfield_unit_get! { - // Let's just exhaustively test getting the bits from a single byte, since - // there are few enough combinations... - - With [0b11100010], then get(0, 1) is 0; - With [0b11100010], then get(1, 1) is 1; - With [0b11100010], then get(2, 1) is 0; - With [0b11100010], then get(3, 1) is 0; - With [0b11100010], then get(4, 1) is 0; - With [0b11100010], then get(5, 1) is 1; - With [0b11100010], then get(6, 1) is 1; - With [0b11100010], then get(7, 1) is 1; - - With [0b11100010], then get(0, 2) is 0b10; - With [0b11100010], then get(1, 2) is 0b01; - With [0b11100010], then get(2, 2) is 0b00; - With [0b11100010], then get(3, 2) is 0b00; - With [0b11100010], then get(4, 2) is 0b10; - With [0b11100010], then get(5, 2) is 0b11; - With [0b11100010], then get(6, 2) is 0b11; - - With [0b11100010], then get(0, 3) is 0b010; - With [0b11100010], then get(1, 3) is 0b001; - With [0b11100010], then get(2, 3) is 0b000; - With [0b11100010], then get(3, 3) is 0b100; - With [0b11100010], then get(4, 3) is 0b110; - With [0b11100010], then get(5, 3) is 0b111; - - With [0b11100010], then get(0, 4) is 0b0010; - With [0b11100010], then get(1, 4) is 0b0001; - With [0b11100010], then get(2, 4) is 0b1000; - With [0b11100010], then get(3, 4) is 0b1100; - With [0b11100010], then get(4, 4) is 0b1110; - - With [0b11100010], then get(0, 5) is 0b00010; - With [0b11100010], then get(1, 5) is 0b10001; - With [0b11100010], then get(2, 5) is 0b11000; - With [0b11100010], then get(3, 5) is 0b11100; - - With [0b11100010], then get(0, 6) is 0b100010; - With [0b11100010], then get(1, 6) is 0b110001; - With [0b11100010], then get(2, 6) is 0b111000; - - With [0b11100010], then get(0, 7) is 0b1100010; - With [0b11100010], then get(1, 7) is 0b1110001; - - With [0b11100010], then get(0, 8) is 0b11100010; - - // OK. Now let's test getting bits from across byte boundaries. - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(0, 16) is 0b1111111101010101; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(1, 16) is 0b0111111110101010; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(2, 16) is 0b0011111111010101; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(3, 16) is 0b0001111111101010; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(4, 16) is 0b0000111111110101; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(5, 16) is 0b0000011111111010; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(6, 16) is 0b0000001111111101; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(7, 16) is 0b0000000111111110; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(8, 16) is 0b0000000011111111; -} - -macro_rules! bitfield_unit_set { - ( - $( - set($start:expr, $len:expr, $val:expr) is $expected:expr; - )* - ) => { - #[test] - fn bitfield_unit_set() { - $( - let mut unit = __BindgenBitfieldUnit::<[u8; 4]>::new([0, 0, 0, 0]); - unit.set($start, $len, $val); - let actual = unit.get(0, 32); - - println!(); - println!("set({}, {}, {:032b}", $start, $len, $val); - println!("expected = {:064b}", $expected); - println!("actual = {:064b}", actual); - - assert_eq!($expected, actual); - )* - } - } -} - -bitfield_unit_set! { - // Once again, let's exhaustively test single byte combinations. - - set(0, 1, 0b11111111) is 0b00000001; - set(1, 1, 0b11111111) is 0b00000010; - set(2, 1, 0b11111111) is 0b00000100; - set(3, 1, 0b11111111) is 0b00001000; - set(4, 1, 0b11111111) is 0b00010000; - set(5, 1, 0b11111111) is 0b00100000; - set(6, 1, 0b11111111) is 0b01000000; - set(7, 1, 0b11111111) is 0b10000000; - - set(0, 2, 0b11111111) is 0b00000011; - set(1, 2, 0b11111111) is 0b00000110; - set(2, 2, 0b11111111) is 0b00001100; - set(3, 2, 0b11111111) is 0b00011000; - set(4, 2, 0b11111111) is 0b00110000; - set(5, 2, 0b11111111) is 0b01100000; - set(6, 2, 0b11111111) is 0b11000000; - - set(0, 3, 0b11111111) is 0b00000111; - set(1, 3, 0b11111111) is 0b00001110; - set(2, 3, 0b11111111) is 0b00011100; - set(3, 3, 0b11111111) is 0b00111000; - set(4, 3, 0b11111111) is 0b01110000; - set(5, 3, 0b11111111) is 0b11100000; - - set(0, 4, 0b11111111) is 0b00001111; - set(1, 4, 0b11111111) is 0b00011110; - set(2, 4, 0b11111111) is 0b00111100; - set(3, 4, 0b11111111) is 0b01111000; - set(4, 4, 0b11111111) is 0b11110000; - - set(0, 5, 0b11111111) is 0b00011111; - set(1, 5, 0b11111111) is 0b00111110; - set(2, 5, 0b11111111) is 0b01111100; - set(3, 5, 0b11111111) is 0b11111000; - - set(0, 6, 0b11111111) is 0b00111111; - set(1, 6, 0b11111111) is 0b01111110; - set(2, 6, 0b11111111) is 0b11111100; - - set(0, 7, 0b11111111) is 0b01111111; - set(1, 7, 0b11111111) is 0b11111110; - - set(0, 8, 0b11111111) is 0b11111111; - - // And, now let's cross byte boundaries. - - set(0, 16, 0b1111111111111111) is 0b00000000000000001111111111111111; - set(1, 16, 0b1111111111111111) is 0b00000000000000011111111111111110; - set(2, 16, 0b1111111111111111) is 0b00000000000000111111111111111100; - set(3, 16, 0b1111111111111111) is 0b00000000000001111111111111111000; - set(4, 16, 0b1111111111111111) is 0b00000000000011111111111111110000; - set(5, 16, 0b1111111111111111) is 0b00000000000111111111111111100000; - set(6, 16, 0b1111111111111111) is 0b00000000001111111111111111000000; - set(7, 16, 0b1111111111111111) is 0b00000000011111111111111110000000; - set(8, 16, 0b1111111111111111) is 0b00000000111111111111111100000000; -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/dyngen.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/dyngen.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/dyngen.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/dyngen.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,178 +0,0 @@ -use crate::codegen; -use crate::ir::function::Abi; -use proc_macro2::Ident; - -/// Used to build the output tokens for dynamic bindings. -#[derive(Default)] -pub struct DynamicItems { - /// Tracks the tokens that will appears inside the library struct -- e.g.: - /// ```ignore - /// struct Lib { - /// __library: ::libloading::Library, - /// pub x: Result, // <- tracks these - /// ... - /// } - /// ``` - struct_members: Vec, - - /// Tracks the tokens that will appear inside the library struct's implementation, e.g.: - /// - /// ```ignore - /// impl Lib { - /// ... - /// pub unsafe fn foo(&self, ...) { // <- tracks these - /// ... - /// } - /// } - /// ``` - struct_implementation: Vec, - - /// Tracks the initialization of the fields inside the `::new` constructor of the library - /// struct, e.g.: - /// ```ignore - /// impl Lib { - /// - /// pub unsafe fn new

(path: P) -> Result - /// where - /// P: AsRef<::std::ffi::OsStr>, - /// { - /// ... - /// let foo = __library.get(...) ...; // <- tracks these - /// ... - /// } - /// - /// ... - /// } - /// ``` - constructor_inits: Vec, - - /// Tracks the information that is passed to the library struct at the end of the `::new` - /// constructor, e.g.: - /// ```ignore - /// impl LibFoo { - /// pub unsafe fn new

(path: P) -> Result - /// where - /// P: AsRef<::std::ffi::OsStr>, - /// { - /// ... - /// Ok(LibFoo { - /// __library: __library, - /// foo, - /// bar, // <- tracks these - /// ... - /// }) - /// } - /// } - /// ``` - init_fields: Vec, -} - -impl DynamicItems { - pub fn new() -> Self { - Self::default() - } - - pub fn get_tokens(&self, lib_ident: Ident) -> proc_macro2::TokenStream { - let struct_members = &self.struct_members; - let constructor_inits = &self.constructor_inits; - let init_fields = &self.init_fields; - let struct_implementation = &self.struct_implementation; - - quote! { - extern crate libloading; - - pub struct #lib_ident { - __library: ::libloading::Library, - #(#struct_members)* - } - - impl #lib_ident { - pub unsafe fn new

*/ - /// Baz = 0, - /// }; - /// ``` - /// - /// In that case, bindgen will generate a constant for `Bar` instead of - /// `Baz`. - constify_enum_variant: bool, - /// List of explicit derives for this type. - derives: Vec, -} - -fn parse_accessor(s: &str) -> FieldAccessorKind { - match s { - "false" => FieldAccessorKind::None, - "unsafe" => FieldAccessorKind::Unsafe, - "immutable" => FieldAccessorKind::Immutable, - _ => FieldAccessorKind::Regular, - } -} - -impl Annotations { - /// Construct new annotations for the given cursor and its bindgen comments - /// (if any). - pub fn new(cursor: &clang::Cursor) -> Option { - let mut anno = Annotations::default(); - let mut matched_one = false; - anno.parse(&cursor.comment(), &mut matched_one); - - if matched_one { - Some(anno) - } else { - None - } - } - - /// Should this type be hidden? - pub fn hide(&self) -> bool { - self.hide - } - - /// Should this type be opaque? - pub fn opaque(&self) -> bool { - self.opaque - } - - /// For a given type, indicates the type it should replace. - /// - /// For example, in the following code: - /// - /// ```cpp - /// - /// /**
*/ - /// struct Foo { int x; }; - /// - /// struct Bar { char foo; }; - /// ``` - /// - /// the generated code would look something like: - /// - /// ``` - /// /**
*/ - /// struct Bar { - /// x: ::std::os::raw::c_int, - /// }; - /// ``` - /// - /// That is, code for `Foo` is used to generate `Bar`. - pub fn use_instead_of(&self) -> Option<&[String]> { - self.use_instead_of.as_deref() - } - - /// The list of derives that have been specified in this annotation. - pub fn derives(&self) -> &[String] { - &self.derives - } - - /// Should we avoid implementing the `Copy` trait? - pub fn disallow_copy(&self) -> bool { - self.disallow_copy - } - - /// Should we avoid implementing the `Debug` trait? - pub fn disallow_debug(&self) -> bool { - self.disallow_debug - } - - /// Should we avoid implementing the `Default` trait? - pub fn disallow_default(&self) -> bool { - self.disallow_default - } - - /// Should this type get a `#[must_use]` annotation? - pub fn must_use_type(&self) -> bool { - self.must_use_type - } - - /// Should the fields be private? - pub fn private_fields(&self) -> Option { - self.private_fields - } - - /// What kind of accessors should we provide for this type's fields? - pub fn accessor_kind(&self) -> Option { - self.accessor_kind - } - - fn parse(&mut self, comment: &clang::Comment, matched: &mut bool) { - use clang_sys::CXComment_HTMLStartTag; - if comment.kind() == CXComment_HTMLStartTag && - comment.get_tag_name() == "div" && - comment - .get_tag_attrs() - .next() - .map_or(false, |attr| attr.name == "rustbindgen") - { - *matched = true; - for attr in comment.get_tag_attrs() { - match attr.name.as_str() { - "opaque" => self.opaque = true, - "hide" => self.hide = true, - "nocopy" => self.disallow_copy = true, - "nodebug" => self.disallow_debug = true, - "nodefault" => self.disallow_default = true, - "mustusetype" => self.must_use_type = true, - "replaces" => { - self.use_instead_of = Some( - attr.value.split("::").map(Into::into).collect(), - ) - } - "derive" => self.derives.push(attr.value), - "private" => { - self.private_fields = Some(attr.value != "false") - } - "accessor" => { - self.accessor_kind = Some(parse_accessor(&attr.value)) - } - "constant" => self.constify_enum_variant = true, - _ => {} - } - } - } - - for child in comment.get_children() { - self.parse(&child, matched); - } - } - - /// Returns whether we've parsed a "constant" attribute. - pub fn constify_enum_variant(&self) -> bool { - self.constify_enum_variant - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/comment.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/comment.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/comment.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/comment.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -//! Utilities for manipulating C/C++ comments. - -/// The type of a comment. -#[derive(Debug, PartialEq, Eq)] -enum Kind { - /// A `///` comment, or something of the like. - /// All lines in a comment should start with the same symbol. - SingleLines, - /// A `/**` comment, where each other line can start with `*` and the - /// entire block ends with `*/`. - MultiLine, -} - -/// Preprocesses a C/C++ comment so that it is a valid Rust comment. -pub fn preprocess(comment: &str, indent: usize) -> String { - match self::kind(comment) { - Some(Kind::SingleLines) => preprocess_single_lines(comment, indent), - Some(Kind::MultiLine) => preprocess_multi_line(comment, indent), - None => comment.to_owned(), - } -} - -/// Gets the kind of the doc comment, if it is one. -fn kind(comment: &str) -> Option { - if comment.starts_with("/*") { - Some(Kind::MultiLine) - } else if comment.starts_with("//") { - Some(Kind::SingleLines) - } else { - None - } -} - -fn make_indent(indent: usize) -> String { - const RUST_INDENTATION: usize = 4; - " ".repeat(indent * RUST_INDENTATION) -} - -/// Preprocesses multiple single line comments. -/// -/// Handles lines starting with both `//` and `///`. -fn preprocess_single_lines(comment: &str, indent: usize) -> String { - debug_assert!(comment.starts_with("//"), "comment is not single line"); - - let indent = make_indent(indent); - let mut is_first = true; - let lines: Vec<_> = comment - .lines() - .map(|l| l.trim().trim_start_matches('/')) - .map(|l| { - let indent = if is_first { "" } else { &*indent }; - is_first = false; - format!("{}///{}", indent, l) - }) - .collect(); - lines.join("\n") -} - -fn preprocess_multi_line(comment: &str, indent: usize) -> String { - let comment = comment - .trim_start_matches('/') - .trim_end_matches('/') - .trim_end_matches('*'); - - let indent = make_indent(indent); - // Strip any potential `*` characters preceding each line. - let mut is_first = true; - let mut lines: Vec<_> = comment - .lines() - .map(|line| line.trim().trim_start_matches('*').trim_start_matches('!')) - .skip_while(|line| line.trim().is_empty()) // Skip the first empty lines. - .map(|line| { - let indent = if is_first { "" } else { &*indent }; - is_first = false; - format!("{}///{}", indent, line) - }) - .collect(); - - // Remove the trailing line corresponding to the `*/`. - if lines - .last() - .map_or(false, |l| l.trim().is_empty() || l.trim() == "///") - { - lines.pop(); - } - - lines.join("\n") -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn picks_up_single_and_multi_line_doc_comments() { - assert_eq!(kind("/// hello"), Some(Kind::SingleLines)); - assert_eq!(kind("/** world */"), Some(Kind::MultiLine)); - } - - #[test] - fn processes_single_lines_correctly() { - assert_eq!(preprocess("/// hello", 0), "/// hello"); - assert_eq!(preprocess("// hello", 0), "/// hello"); - assert_eq!(preprocess("// hello", 0), "/// hello"); - } - - #[test] - fn processes_multi_lines_correctly() { - assert_eq!( - preprocess("/** hello \n * world \n * foo \n */", 0), - "/// hello\n/// world\n/// foo" - ); - - assert_eq!( - preprocess("/**\nhello\n*world\n*foo\n*/", 0), - "///hello\n///world\n///foo" - ); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/comp.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/comp.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/comp.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/comp.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1854 +0,0 @@ -//! Compound types (unions and structs) in our intermediate representation. - -use super::analysis::Sizedness; -use super::annotations::Annotations; -use super::context::{BindgenContext, FunctionId, ItemId, TypeId, VarId}; -use super::dot::DotAttributes; -use super::item::{IsOpaque, Item}; -use super::layout::Layout; -use super::template::TemplateParameters; -use super::traversal::{EdgeKind, Trace, Tracer}; -use super::ty::RUST_DERIVE_IN_ARRAY_LIMIT; -use crate::clang; -use crate::codegen::struct_layout::{align_to, bytes_from_bits_pow2}; -use crate::ir::derive::CanDeriveCopy; -use crate::parse::{ClangItemParser, ParseError}; -use crate::HashMap; -use peeking_take_while::PeekableExt; -use std::cmp; -use std::io; -use std::mem; - -/// The kind of compound type. -#[derive(Debug, Copy, Clone, PartialEq)] -pub enum CompKind { - /// A struct. - Struct, - /// A union. - Union, -} - -/// The kind of C++ method. -#[derive(Debug, Copy, Clone, PartialEq)] -pub enum MethodKind { - /// A constructor. We represent it as method for convenience, to avoid code - /// duplication. - Constructor, - /// A destructor. - Destructor, - /// A virtual destructor. - VirtualDestructor { - /// Whether it's pure virtual. - pure_virtual: bool, - }, - /// A static method. - Static, - /// A normal method. - Normal, - /// A virtual method. - Virtual { - /// Whether it's pure virtual. - pure_virtual: bool, - }, -} - -impl MethodKind { - /// Is this a destructor method? - pub fn is_destructor(&self) -> bool { - match *self { - MethodKind::Destructor | MethodKind::VirtualDestructor { .. } => { - true - } - _ => false, - } - } - - /// Is this a pure virtual method? - pub fn is_pure_virtual(&self) -> bool { - match *self { - MethodKind::Virtual { pure_virtual } | - MethodKind::VirtualDestructor { pure_virtual } => pure_virtual, - _ => false, - } - } -} - -/// A struct representing a C++ method, either static, normal, or virtual. -#[derive(Debug)] -pub struct Method { - kind: MethodKind, - /// The signature of the method. Take into account this is not a `Type` - /// item, but a `Function` one. - /// - /// This is tricky and probably this field should be renamed. - signature: FunctionId, - is_const: bool, -} - -impl Method { - /// Construct a new `Method`. - pub fn new( - kind: MethodKind, - signature: FunctionId, - is_const: bool, - ) -> Self { - Method { - kind, - signature, - is_const, - } - } - - /// What kind of method is this? - pub fn kind(&self) -> MethodKind { - self.kind - } - - /// Is this a constructor? - pub fn is_constructor(&self) -> bool { - self.kind == MethodKind::Constructor - } - - /// Is this a virtual method? - pub fn is_virtual(&self) -> bool { - matches!( - self.kind, - MethodKind::Virtual { .. } | MethodKind::VirtualDestructor { .. } - ) - } - - /// Is this a static method? - pub fn is_static(&self) -> bool { - self.kind == MethodKind::Static - } - - /// Get the id for the `Function` signature for this method. - pub fn signature(&self) -> FunctionId { - self.signature - } - - /// Is this a const qualified method? - pub fn is_const(&self) -> bool { - self.is_const - } -} - -/// Methods common to the various field types. -pub trait FieldMethods { - /// Get the name of this field. - fn name(&self) -> Option<&str>; - - /// Get the type of this field. - fn ty(&self) -> TypeId; - - /// Get the comment for this field. - fn comment(&self) -> Option<&str>; - - /// If this is a bitfield, how many bits does it need? - fn bitfield_width(&self) -> Option; - - /// Is this feild declared public? - fn is_public(&self) -> bool; - - /// Get the annotations for this field. - fn annotations(&self) -> &Annotations; - - /// The offset of the field (in bits) - fn offset(&self) -> Option; -} - -/// A contiguous set of logical bitfields that live within the same physical -/// allocation unit. See 9.2.4 [class.bit] in the C++ standard and [section -/// 2.4.II.1 in the Itanium C++ -/// ABI](http://itanium-cxx-abi.github.io/cxx-abi/abi.html#class-types). -#[derive(Debug)] -pub struct BitfieldUnit { - nth: usize, - layout: Layout, - bitfields: Vec, -} - -impl BitfieldUnit { - /// Get the 1-based index of this bitfield unit within its containing - /// struct. Useful for generating a Rust struct's field name for this unit - /// of bitfields. - pub fn nth(&self) -> usize { - self.nth - } - - /// Get the layout within which these bitfields reside. - pub fn layout(&self) -> Layout { - self.layout - } - - /// Get the bitfields within this unit. - pub fn bitfields(&self) -> &[Bitfield] { - &self.bitfields - } -} - -/// A struct representing a C++ field. -#[derive(Debug)] -pub enum Field { - /// A normal data member. - DataMember(FieldData), - - /// A physical allocation unit containing many logical bitfields. - Bitfields(BitfieldUnit), -} - -impl Field { - /// Get this field's layout. - pub fn layout(&self, ctx: &BindgenContext) -> Option { - match *self { - Field::Bitfields(BitfieldUnit { layout, .. }) => Some(layout), - Field::DataMember(ref data) => { - ctx.resolve_type(data.ty).layout(ctx) - } - } - } -} - -impl Trace for Field { - type Extra = (); - - fn trace(&self, _: &BindgenContext, tracer: &mut T, _: &()) - where - T: Tracer, - { - match *self { - Field::DataMember(ref data) => { - tracer.visit_kind(data.ty.into(), EdgeKind::Field); - } - Field::Bitfields(BitfieldUnit { ref bitfields, .. }) => { - for bf in bitfields { - tracer.visit_kind(bf.ty().into(), EdgeKind::Field); - } - } - } - } -} - -impl DotAttributes for Field { - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - match *self { - Field::DataMember(ref data) => data.dot_attributes(ctx, out), - Field::Bitfields(BitfieldUnit { - layout, - ref bitfields, - .. - }) => { - writeln!( - out, - r#" - bitfield unit - - - - - - - - - "#, - layout.size, layout.align - )?; - for bf in bitfields { - bf.dot_attributes(ctx, out)?; - } - writeln!(out, "
unit.size{}
unit.align{}
") - } - } - } -} - -impl DotAttributes for FieldData { - fn dot_attributes( - &self, - _ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!( - out, - "{}{:?}", - self.name().unwrap_or("(anonymous)"), - self.ty() - ) - } -} - -impl DotAttributes for Bitfield { - fn dot_attributes( - &self, - _ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!( - out, - "{} : {}{:?}", - self.name().unwrap_or("(anonymous)"), - self.width(), - self.ty() - ) - } -} - -/// A logical bitfield within some physical bitfield allocation unit. -#[derive(Debug)] -pub struct Bitfield { - /// Index of the bit within this bitfield's allocation unit where this - /// bitfield's bits begin. - offset_into_unit: usize, - - /// The field data for this bitfield. - data: FieldData, - - /// Name of the generated Rust getter for this bitfield. - /// - /// Should be assigned before codegen. - getter_name: Option, - - /// Name of the generated Rust setter for this bitfield. - /// - /// Should be assigned before codegen. - setter_name: Option, -} - -impl Bitfield { - /// Construct a new bitfield. - fn new(offset_into_unit: usize, raw: RawField) -> Bitfield { - assert!(raw.bitfield_width().is_some()); - - Bitfield { - offset_into_unit, - data: raw.0, - getter_name: None, - setter_name: None, - } - } - - /// Get the index of the bit within this bitfield's allocation unit where - /// this bitfield begins. - pub fn offset_into_unit(&self) -> usize { - self.offset_into_unit - } - - /// Get the mask value that when &'ed with this bitfield's allocation unit - /// produces this bitfield's value. - pub fn mask(&self) -> u64 { - use std::u64; - - let unoffseted_mask = - if self.width() as u64 == mem::size_of::() as u64 * 8 { - u64::MAX - } else { - (1u64 << self.width()) - 1u64 - }; - - unoffseted_mask << self.offset_into_unit() - } - - /// Get the bit width of this bitfield. - pub fn width(&self) -> u32 { - self.data.bitfield_width().unwrap() - } - - /// Name of the generated Rust getter for this bitfield. - /// - /// Panics if called before assigning bitfield accessor names or if - /// this bitfield have no name. - pub fn getter_name(&self) -> &str { - assert!( - self.name().is_some(), - "`Bitfield::getter_name` called on anonymous field" - ); - self.getter_name.as_ref().expect( - "`Bitfield::getter_name` should only be called after\ - assigning bitfield accessor names", - ) - } - - /// Name of the generated Rust setter for this bitfield. - /// - /// Panics if called before assigning bitfield accessor names or if - /// this bitfield have no name. - pub fn setter_name(&self) -> &str { - assert!( - self.name().is_some(), - "`Bitfield::setter_name` called on anonymous field" - ); - self.setter_name.as_ref().expect( - "`Bitfield::setter_name` should only be called\ - after assigning bitfield accessor names", - ) - } -} - -impl FieldMethods for Bitfield { - fn name(&self) -> Option<&str> { - self.data.name() - } - - fn ty(&self) -> TypeId { - self.data.ty() - } - - fn comment(&self) -> Option<&str> { - self.data.comment() - } - - fn bitfield_width(&self) -> Option { - self.data.bitfield_width() - } - - fn is_public(&self) -> bool { - self.data.is_public() - } - - fn annotations(&self) -> &Annotations { - self.data.annotations() - } - - fn offset(&self) -> Option { - self.data.offset() - } -} - -/// A raw field might be either of a plain data member or a bitfield within a -/// bitfield allocation unit, but we haven't processed it and determined which -/// yet (which would involve allocating it into a bitfield unit if it is a -/// bitfield). -#[derive(Debug)] -struct RawField(FieldData); - -impl RawField { - /// Construct a new `RawField`. - fn new( - name: Option, - ty: TypeId, - comment: Option, - annotations: Option, - bitfield_width: Option, - public: bool, - offset: Option, - ) -> RawField { - RawField(FieldData { - name, - ty, - comment, - annotations: annotations.unwrap_or_default(), - bitfield_width, - public, - offset, - }) - } -} - -impl FieldMethods for RawField { - fn name(&self) -> Option<&str> { - self.0.name() - } - - fn ty(&self) -> TypeId { - self.0.ty() - } - - fn comment(&self) -> Option<&str> { - self.0.comment() - } - - fn bitfield_width(&self) -> Option { - self.0.bitfield_width() - } - - fn is_public(&self) -> bool { - self.0.is_public() - } - - fn annotations(&self) -> &Annotations { - self.0.annotations() - } - - fn offset(&self) -> Option { - self.0.offset() - } -} - -/// Convert the given ordered set of raw fields into a list of either plain data -/// members, and/or bitfield units containing multiple bitfields. -/// -/// If we do not have the layout for a bitfield's type, then we can't reliably -/// compute its allocation unit. In such cases, we return an error. -fn raw_fields_to_fields_and_bitfield_units( - ctx: &BindgenContext, - raw_fields: I, - packed: bool, -) -> Result<(Vec, bool), ()> -where - I: IntoIterator, -{ - let mut raw_fields = raw_fields.into_iter().fuse().peekable(); - let mut fields = vec![]; - let mut bitfield_unit_count = 0; - - loop { - // While we have plain old data members, just keep adding them to our - // resulting fields. We introduce a scope here so that we can use - // `raw_fields` again after the `by_ref` iterator adaptor is dropped. - { - let non_bitfields = raw_fields - .by_ref() - .peeking_take_while(|f| f.bitfield_width().is_none()) - .map(|f| Field::DataMember(f.0)); - fields.extend(non_bitfields); - } - - // Now gather all the consecutive bitfields. Only consecutive bitfields - // may potentially share a bitfield allocation unit with each other in - // the Itanium C++ ABI. - let mut bitfields = raw_fields - .by_ref() - .peeking_take_while(|f| f.bitfield_width().is_some()) - .peekable(); - - if bitfields.peek().is_none() { - break; - } - - bitfields_to_allocation_units( - ctx, - &mut bitfield_unit_count, - &mut fields, - bitfields, - packed, - )?; - } - - assert!( - raw_fields.next().is_none(), - "The above loop should consume all items in `raw_fields`" - ); - - Ok((fields, bitfield_unit_count != 0)) -} - -/// Given a set of contiguous raw bitfields, group and allocate them into -/// (potentially multiple) bitfield units. -fn bitfields_to_allocation_units( - ctx: &BindgenContext, - bitfield_unit_count: &mut usize, - fields: &mut E, - raw_bitfields: I, - packed: bool, -) -> Result<(), ()> -where - E: Extend, - I: IntoIterator, -{ - assert!(ctx.collected_typerefs()); - - // NOTE: What follows is reverse-engineered from LLVM's - // lib/AST/RecordLayoutBuilder.cpp - // - // FIXME(emilio): There are some differences between Microsoft and the - // Itanium ABI, but we'll ignore those and stick to Itanium for now. - // - // Also, we need to handle packed bitfields and stuff. - // - // TODO(emilio): Take into account C++'s wide bitfields, and - // packing, sigh. - - fn flush_allocation_unit( - fields: &mut E, - bitfield_unit_count: &mut usize, - unit_size_in_bits: usize, - unit_align_in_bits: usize, - bitfields: Vec, - packed: bool, - ) where - E: Extend, - { - *bitfield_unit_count += 1; - let align = if packed { - 1 - } else { - bytes_from_bits_pow2(unit_align_in_bits) - }; - let size = align_to(unit_size_in_bits, 8) / 8; - let layout = Layout::new(size, align); - fields.extend(Some(Field::Bitfields(BitfieldUnit { - nth: *bitfield_unit_count, - layout, - bitfields, - }))); - } - - let mut max_align = 0; - let mut unfilled_bits_in_unit = 0; - let mut unit_size_in_bits = 0; - let mut unit_align = 0; - let mut bitfields_in_unit = vec![]; - - // TODO(emilio): Determine this from attributes or pragma ms_struct - // directives. Also, perhaps we should check if the target is MSVC? - const is_ms_struct: bool = false; - - for bitfield in raw_bitfields { - let bitfield_width = bitfield.bitfield_width().unwrap() as usize; - let bitfield_layout = - ctx.resolve_type(bitfield.ty()).layout(ctx).ok_or(())?; - let bitfield_size = bitfield_layout.size; - let bitfield_align = bitfield_layout.align; - - let mut offset = unit_size_in_bits; - if !packed { - if is_ms_struct { - if unit_size_in_bits != 0 && - (bitfield_width == 0 || - bitfield_width > unfilled_bits_in_unit) - { - // We've reached the end of this allocation unit, so flush it - // and its bitfields. - unit_size_in_bits = - align_to(unit_size_in_bits, unit_align * 8); - flush_allocation_unit( - fields, - bitfield_unit_count, - unit_size_in_bits, - unit_align, - mem::take(&mut bitfields_in_unit), - packed, - ); - - // Now we're working on a fresh bitfield allocation unit, so reset - // the current unit size and alignment. - offset = 0; - unit_align = 0; - } - } else if offset != 0 && - (bitfield_width == 0 || - (offset & (bitfield_align * 8 - 1)) + bitfield_width > - bitfield_size * 8) - { - offset = align_to(offset, bitfield_align * 8); - } - } - - // According to the x86[-64] ABI spec: "Unnamed bit-fields’ types do not - // affect the alignment of a structure or union". This makes sense: such - // bit-fields are only used for padding, and we can't perform an - // un-aligned read of something we can't read because we can't even name - // it. - if bitfield.name().is_some() { - max_align = cmp::max(max_align, bitfield_align); - - // NB: The `bitfield_width` here is completely, absolutely - // intentional. Alignment of the allocation unit is based on the - // maximum bitfield width, not (directly) on the bitfields' types' - // alignment. - unit_align = cmp::max(unit_align, bitfield_width); - } - - // Always keep all bitfields around. While unnamed bitifields are used - // for padding (and usually not needed hereafter), large unnamed - // bitfields over their types size cause weird allocation size behavior from clang. - // Therefore, all bitfields needed to be kept around in order to check for this - // and make the struct opaque in this case - bitfields_in_unit.push(Bitfield::new(offset, bitfield)); - - unit_size_in_bits = offset + bitfield_width; - - // Compute what the physical unit's final size would be given what we - // have seen so far, and use that to compute how many bits are still - // available in the unit. - let data_size = align_to(unit_size_in_bits, bitfield_align * 8); - unfilled_bits_in_unit = data_size - unit_size_in_bits; - } - - if unit_size_in_bits != 0 { - // Flush the last allocation unit and its bitfields. - flush_allocation_unit( - fields, - bitfield_unit_count, - unit_size_in_bits, - unit_align, - bitfields_in_unit, - packed, - ); - } - - Ok(()) -} - -/// A compound structure's fields are initially raw, and have bitfields that -/// have not been grouped into allocation units. During this time, the fields -/// are mutable and we build them up during parsing. -/// -/// Then, once resolving typerefs is completed, we compute all structs' fields' -/// bitfield allocation units, and they remain frozen and immutable forever -/// after. -#[derive(Debug)] -enum CompFields { - Before(Vec), - After { - fields: Vec, - has_bitfield_units: bool, - }, - Error, -} - -impl Default for CompFields { - fn default() -> CompFields { - CompFields::Before(vec![]) - } -} - -impl CompFields { - fn append_raw_field(&mut self, raw: RawField) { - match *self { - CompFields::Before(ref mut raws) => { - raws.push(raw); - } - _ => { - panic!( - "Must not append new fields after computing bitfield allocation units" - ); - } - } - } - - fn compute_bitfield_units(&mut self, ctx: &BindgenContext, packed: bool) { - let raws = match *self { - CompFields::Before(ref mut raws) => mem::take(raws), - _ => { - panic!("Already computed bitfield units"); - } - }; - - let result = raw_fields_to_fields_and_bitfield_units(ctx, raws, packed); - - match result { - Ok((fields, has_bitfield_units)) => { - *self = CompFields::After { - fields, - has_bitfield_units, - }; - } - Err(()) => { - *self = CompFields::Error; - } - } - } - - fn deanonymize_fields(&mut self, ctx: &BindgenContext, methods: &[Method]) { - let fields = match *self { - CompFields::After { ref mut fields, .. } => fields, - // Nothing to do here. - CompFields::Error => return, - CompFields::Before(_) => { - panic!("Not yet computed bitfield units."); - } - }; - - fn has_method( - methods: &[Method], - ctx: &BindgenContext, - name: &str, - ) -> bool { - methods.iter().any(|method| { - let method_name = ctx.resolve_func(method.signature()).name(); - method_name == name || ctx.rust_mangle(method_name) == name - }) - } - - struct AccessorNamesPair { - getter: String, - setter: String, - } - - let mut accessor_names: HashMap = fields - .iter() - .flat_map(|field| match *field { - Field::Bitfields(ref bu) => &*bu.bitfields, - Field::DataMember(_) => &[], - }) - .filter_map(|bitfield| bitfield.name()) - .map(|bitfield_name| { - let bitfield_name = bitfield_name.to_string(); - let getter = { - let mut getter = - ctx.rust_mangle(&bitfield_name).to_string(); - if has_method(methods, ctx, &getter) { - getter.push_str("_bindgen_bitfield"); - } - getter - }; - let setter = { - let setter = format!("set_{}", bitfield_name); - let mut setter = ctx.rust_mangle(&setter).to_string(); - if has_method(methods, ctx, &setter) { - setter.push_str("_bindgen_bitfield"); - } - setter - }; - (bitfield_name, AccessorNamesPair { getter, setter }) - }) - .collect(); - - let mut anon_field_counter = 0; - for field in fields.iter_mut() { - match *field { - Field::DataMember(FieldData { ref mut name, .. }) => { - if name.is_some() { - continue; - } - - anon_field_counter += 1; - *name = Some(format!( - "{}{}", - ctx.options().anon_fields_prefix, - anon_field_counter - )); - } - Field::Bitfields(ref mut bu) => { - for bitfield in &mut bu.bitfields { - if bitfield.name().is_none() { - continue; - } - - if let Some(AccessorNamesPair { getter, setter }) = - accessor_names.remove(bitfield.name().unwrap()) - { - bitfield.getter_name = Some(getter); - bitfield.setter_name = Some(setter); - } - } - } - } - } - } -} - -impl Trace for CompFields { - type Extra = (); - - fn trace(&self, context: &BindgenContext, tracer: &mut T, _: &()) - where - T: Tracer, - { - match *self { - CompFields::Error => {} - CompFields::Before(ref fields) => { - for f in fields { - tracer.visit_kind(f.ty().into(), EdgeKind::Field); - } - } - CompFields::After { ref fields, .. } => { - for f in fields { - f.trace(context, tracer, &()); - } - } - } - } -} - -/// Common data shared across different field types. -#[derive(Clone, Debug)] -pub struct FieldData { - /// The name of the field, empty if it's an unnamed bitfield width. - name: Option, - - /// The inner type. - ty: TypeId, - - /// The doc comment on the field if any. - comment: Option, - - /// Annotations for this field, or the default. - annotations: Annotations, - - /// If this field is a bitfield, and how many bits does it contain if it is. - bitfield_width: Option, - - /// If the C++ field is declared `public` - public: bool, - - /// The offset of the field (in bits) - offset: Option, -} - -impl FieldMethods for FieldData { - fn name(&self) -> Option<&str> { - self.name.as_deref() - } - - fn ty(&self) -> TypeId { - self.ty - } - - fn comment(&self) -> Option<&str> { - self.comment.as_deref() - } - - fn bitfield_width(&self) -> Option { - self.bitfield_width - } - - fn is_public(&self) -> bool { - self.public - } - - fn annotations(&self) -> &Annotations { - &self.annotations - } - - fn offset(&self) -> Option { - self.offset - } -} - -/// The kind of inheritance a base class is using. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum BaseKind { - /// Normal inheritance, like: - /// - /// ```cpp - /// class A : public B {}; - /// ``` - Normal, - /// Virtual inheritance, like: - /// - /// ```cpp - /// class A: public virtual B {}; - /// ``` - Virtual, -} - -/// A base class. -#[derive(Clone, Debug)] -pub struct Base { - /// The type of this base class. - pub ty: TypeId, - /// The kind of inheritance we're doing. - pub kind: BaseKind, - /// Name of the field in which this base should be stored. - pub field_name: String, - /// Whether this base is inherited from publically. - pub is_pub: bool, -} - -impl Base { - /// Whether this base class is inheriting virtually. - pub fn is_virtual(&self) -> bool { - self.kind == BaseKind::Virtual - } - - /// Whether this base class should have it's own field for storage. - pub fn requires_storage(&self, ctx: &BindgenContext) -> bool { - // Virtual bases are already taken into account by the vtable - // pointer. - // - // FIXME(emilio): Is this always right? - if self.is_virtual() { - return false; - } - - // NB: We won't include zero-sized types in our base chain because they - // would contribute to our size given the dummy field we insert for - // zero-sized types. - if self.ty.is_zero_sized(ctx) { - return false; - } - - true - } - - /// Whether this base is inherited from publically. - pub fn is_public(&self) -> bool { - self.is_pub - } -} - -/// A compound type. -/// -/// Either a struct or union, a compound type is built up from the combination -/// of fields which also are associated with their own (potentially compound) -/// type. -#[derive(Debug)] -pub struct CompInfo { - /// Whether this is a struct or a union. - kind: CompKind, - - /// The members of this struct or union. - fields: CompFields, - - /// The abstract template parameters of this class. Note that these are NOT - /// concrete template arguments, and should always be a - /// `Type(TypeKind::TypeParam(name))`. For concrete template arguments, see - /// `TypeKind::TemplateInstantiation`. - template_params: Vec, - - /// The method declarations inside this class, if in C++ mode. - methods: Vec, - - /// The different constructors this struct or class contains. - constructors: Vec, - - /// The destructor of this type. The bool represents whether this destructor - /// is virtual. - destructor: Option<(MethodKind, FunctionId)>, - - /// Vector of classes this one inherits from. - base_members: Vec, - - /// The inner types that were declared inside this class, in something like: - /// - /// class Foo { - /// typedef int FooTy; - /// struct Bar { - /// int baz; - /// }; - /// } - /// - /// static Foo::Bar const = {3}; - inner_types: Vec, - - /// Set of static constants declared inside this class. - inner_vars: Vec, - - /// Whether this type should generate an vtable (TODO: Should be able to - /// look at the virtual methods and ditch this field). - has_own_virtual_method: bool, - - /// Whether this type has destructor. - has_destructor: bool, - - /// Whether this type has a base type with more than one member. - /// - /// TODO: We should be able to compute this. - has_nonempty_base: bool, - - /// If this type has a template parameter which is not a type (e.g.: a - /// size_t) - has_non_type_template_params: bool, - - /// Whether we saw `__attribute__((packed))` on or within this type. - packed_attr: bool, - - /// Used to know if we've found an opaque attribute that could cause us to - /// generate a type with invalid layout. This is explicitly used to avoid us - /// generating bad alignments when parsing types like max_align_t. - /// - /// It's not clear what the behavior should be here, if generating the item - /// and pray, or behave as an opaque type. - found_unknown_attr: bool, - - /// Used to indicate when a struct has been forward declared. Usually used - /// in headers so that APIs can't modify them directly. - is_forward_declaration: bool, -} - -impl CompInfo { - /// Construct a new compound type. - pub fn new(kind: CompKind) -> Self { - CompInfo { - kind, - fields: CompFields::default(), - template_params: vec![], - methods: vec![], - constructors: vec![], - destructor: None, - base_members: vec![], - inner_types: vec![], - inner_vars: vec![], - has_own_virtual_method: false, - has_destructor: false, - has_nonempty_base: false, - has_non_type_template_params: false, - packed_attr: false, - found_unknown_attr: false, - is_forward_declaration: false, - } - } - - /// Compute the layout of this type. - /// - /// This is called as a fallback under some circumstances where LLVM doesn't - /// give us the correct layout. - /// - /// If we're a union without known layout, we try to compute it from our - /// members. This is not ideal, but clang fails to report the size for these - /// kind of unions, see test/headers/template_union.hpp - pub fn layout(&self, ctx: &BindgenContext) -> Option { - // We can't do better than clang here, sorry. - if self.kind == CompKind::Struct { - return None; - } - - // By definition, we don't have the right layout information here if - // we're a forward declaration. - if self.is_forward_declaration() { - return None; - } - - // empty union case - if !self.has_fields() { - return None; - } - - let mut max_size = 0; - // Don't allow align(0) - let mut max_align = 1; - self.each_known_field_layout(ctx, |layout| { - max_size = cmp::max(max_size, layout.size); - max_align = cmp::max(max_align, layout.align); - }); - - Some(Layout::new(max_size, max_align)) - } - - /// Get this type's set of fields. - pub fn fields(&self) -> &[Field] { - match self.fields { - CompFields::Error => &[], - CompFields::After { ref fields, .. } => fields, - CompFields::Before(..) => { - panic!("Should always have computed bitfield units first"); - } - } - } - - fn has_fields(&self) -> bool { - match self.fields { - CompFields::Error => false, - CompFields::After { ref fields, .. } => !fields.is_empty(), - CompFields::Before(ref raw_fields) => !raw_fields.is_empty(), - } - } - - fn each_known_field_layout( - &self, - ctx: &BindgenContext, - mut callback: impl FnMut(Layout), - ) { - match self.fields { - CompFields::Error => {} - CompFields::After { ref fields, .. } => { - for field in fields.iter() { - if let Some(layout) = field.layout(ctx) { - callback(layout); - } - } - } - CompFields::Before(ref raw_fields) => { - for field in raw_fields.iter() { - let field_ty = ctx.resolve_type(field.0.ty); - if let Some(layout) = field_ty.layout(ctx) { - callback(layout); - } - } - } - } - } - - fn has_bitfields(&self) -> bool { - match self.fields { - CompFields::Error => false, - CompFields::After { - has_bitfield_units, .. - } => has_bitfield_units, - CompFields::Before(_) => { - panic!("Should always have computed bitfield units first"); - } - } - } - - /// Returns whether we have a too large bitfield unit, in which case we may - /// not be able to derive some of the things we should be able to normally - /// derive. - pub fn has_too_large_bitfield_unit(&self) -> bool { - if !self.has_bitfields() { - return false; - } - self.fields().iter().any(|field| match *field { - Field::DataMember(..) => false, - Field::Bitfields(ref unit) => { - unit.layout.size > RUST_DERIVE_IN_ARRAY_LIMIT - } - }) - } - - /// Does this type have any template parameters that aren't types - /// (e.g. int)? - pub fn has_non_type_template_params(&self) -> bool { - self.has_non_type_template_params - } - - /// Do we see a virtual function during parsing? - /// Get the has_own_virtual_method boolean. - pub fn has_own_virtual_method(&self) -> bool { - self.has_own_virtual_method - } - - /// Did we see a destructor when parsing this type? - pub fn has_own_destructor(&self) -> bool { - self.has_destructor - } - - /// Get this type's set of methods. - pub fn methods(&self) -> &[Method] { - &self.methods - } - - /// Get this type's set of constructors. - pub fn constructors(&self) -> &[FunctionId] { - &self.constructors - } - - /// Get this type's destructor. - pub fn destructor(&self) -> Option<(MethodKind, FunctionId)> { - self.destructor - } - - /// What kind of compound type is this? - pub fn kind(&self) -> CompKind { - self.kind - } - - /// Is this a union? - pub fn is_union(&self) -> bool { - self.kind() == CompKind::Union - } - - /// The set of types that this one inherits from. - pub fn base_members(&self) -> &[Base] { - &self.base_members - } - - /// Construct a new compound type from a Clang type. - pub fn from_ty( - potential_id: ItemId, - ty: &clang::Type, - location: Option, - ctx: &mut BindgenContext, - ) -> Result { - use clang_sys::*; - assert!( - ty.template_args().is_none(), - "We handle template instantiations elsewhere" - ); - - let mut cursor = ty.declaration(); - let mut kind = Self::kind_from_cursor(&cursor); - if kind.is_err() { - if let Some(location) = location { - kind = Self::kind_from_cursor(&location); - cursor = location; - } - } - - let kind = kind?; - - debug!("CompInfo::from_ty({:?}, {:?})", kind, cursor); - - let mut ci = CompInfo::new(kind); - ci.is_forward_declaration = - location.map_or(true, |cur| match cur.kind() { - CXCursor_ParmDecl => true, - CXCursor_StructDecl | CXCursor_UnionDecl | - CXCursor_ClassDecl => !cur.is_definition(), - _ => false, - }); - - let mut maybe_anonymous_struct_field = None; - cursor.visit(|cur| { - if cur.kind() != CXCursor_FieldDecl { - if let Some((ty, clang_ty, public, offset)) = - maybe_anonymous_struct_field.take() - { - if cur.kind() == CXCursor_TypedefDecl && - cur.typedef_type().unwrap().canonical_type() == - clang_ty - { - // Typedefs of anonymous structs appear later in the ast - // than the struct itself, that would otherwise be an - // anonymous field. Detect that case here, and do - // nothing. - } else { - let field = RawField::new( - None, ty, None, None, None, public, offset, - ); - ci.fields.append_raw_field(field); - } - } - } - - match cur.kind() { - CXCursor_FieldDecl => { - if let Some((ty, clang_ty, public, offset)) = - maybe_anonymous_struct_field.take() - { - let mut used = false; - cur.visit(|child| { - if child.cur_type() == clang_ty { - used = true; - } - CXChildVisit_Continue - }); - - if !used { - let field = RawField::new( - None, ty, None, None, None, public, offset, - ); - ci.fields.append_raw_field(field); - } - } - - let bit_width = cur.bit_width(); - let field_type = Item::from_ty_or_ref( - cur.cur_type(), - cur, - Some(potential_id), - ctx, - ); - - let comment = cur.raw_comment(); - let annotations = Annotations::new(&cur); - let name = cur.spelling(); - let is_public = cur.public_accessible(); - let offset = cur.offset_of_field().ok(); - - // Name can be empty if there are bitfields, for example, - // see tests/headers/struct_with_bitfields.h - assert!( - !name.is_empty() || bit_width.is_some(), - "Empty field name?" - ); - - let name = if name.is_empty() { None } else { Some(name) }; - - let field = RawField::new( - name, - field_type, - comment, - annotations, - bit_width, - is_public, - offset, - ); - ci.fields.append_raw_field(field); - - // No we look for things like attributes and stuff. - cur.visit(|cur| { - if cur.kind() == CXCursor_UnexposedAttr { - ci.found_unknown_attr = true; - } - CXChildVisit_Continue - }); - } - CXCursor_UnexposedAttr => { - ci.found_unknown_attr = true; - } - CXCursor_EnumDecl | - CXCursor_TypeAliasDecl | - CXCursor_TypeAliasTemplateDecl | - CXCursor_TypedefDecl | - CXCursor_StructDecl | - CXCursor_UnionDecl | - CXCursor_ClassTemplate | - CXCursor_ClassDecl => { - // We can find non-semantic children here, clang uses a - // StructDecl to note incomplete structs that haven't been - // forward-declared before, see [1]. - // - // Also, clang seems to scope struct definitions inside - // unions, and other named struct definitions inside other - // structs to the whole translation unit. - // - // Let's just assume that if the cursor we've found is a - // definition, it's a valid inner type. - // - // [1]: https://github.com/rust-lang/rust-bindgen/issues/482 - let is_inner_struct = - cur.semantic_parent() == cursor || cur.is_definition(); - if !is_inner_struct { - return CXChildVisit_Continue; - } - - // Even if this is a definition, we may not be the semantic - // parent, see #1281. - let inner = Item::parse(cur, Some(potential_id), ctx) - .expect("Inner ClassDecl"); - - // If we avoided recursion parsing this type (in - // `Item::from_ty_with_id()`), then this might not be a - // valid type ID, so check and gracefully handle this. - if ctx.resolve_item_fallible(inner).is_some() { - let inner = inner.expect_type_id(ctx); - - ci.inner_types.push(inner); - - // A declaration of an union or a struct without name - // could also be an unnamed field, unfortunately. - if cur.spelling().is_empty() && - cur.kind() != CXCursor_EnumDecl - { - let ty = cur.cur_type(); - let public = cur.public_accessible(); - let offset = cur.offset_of_field().ok(); - - maybe_anonymous_struct_field = - Some((inner, ty, public, offset)); - } - } - } - CXCursor_PackedAttr => { - ci.packed_attr = true; - } - CXCursor_TemplateTypeParameter => { - let param = Item::type_param(None, cur, ctx).expect( - "Item::type_param should't fail when pointing \ - at a TemplateTypeParameter", - ); - ci.template_params.push(param); - } - CXCursor_CXXBaseSpecifier => { - let is_virtual_base = cur.is_virtual_base(); - ci.has_own_virtual_method |= is_virtual_base; - - let kind = if is_virtual_base { - BaseKind::Virtual - } else { - BaseKind::Normal - }; - - let field_name = match ci.base_members.len() { - 0 => "_base".into(), - n => format!("_base_{}", n), - }; - let type_id = - Item::from_ty_or_ref(cur.cur_type(), cur, None, ctx); - ci.base_members.push(Base { - ty: type_id, - kind, - field_name, - is_pub: cur.access_specifier() == - clang_sys::CX_CXXPublic, - }); - } - CXCursor_Constructor | CXCursor_Destructor | - CXCursor_CXXMethod => { - let is_virtual = cur.method_is_virtual(); - let is_static = cur.method_is_static(); - debug_assert!(!(is_static && is_virtual), "How?"); - - ci.has_destructor |= cur.kind() == CXCursor_Destructor; - ci.has_own_virtual_method |= is_virtual; - - // This used to not be here, but then I tried generating - // stylo bindings with this (without path filters), and - // cried a lot with a method in gfx/Point.h - // (ToUnknownPoint), that somehow was causing the same type - // to be inserted in the map two times. - // - // I couldn't make a reduced test case, but anyway... - // Methods of template functions not only used to be inlined, - // but also instantiated, and we wouldn't be able to call - // them, so just bail out. - if !ci.template_params.is_empty() { - return CXChildVisit_Continue; - } - - // NB: This gets us an owned `Function`, not a - // `FunctionSig`. - let signature = - match Item::parse(cur, Some(potential_id), ctx) { - Ok(item) - if ctx - .resolve_item(item) - .kind() - .is_function() => - { - item - } - _ => return CXChildVisit_Continue, - }; - - let signature = signature.expect_function_id(ctx); - - match cur.kind() { - CXCursor_Constructor => { - ci.constructors.push(signature); - } - CXCursor_Destructor => { - let kind = if is_virtual { - MethodKind::VirtualDestructor { - pure_virtual: cur.method_is_pure_virtual(), - } - } else { - MethodKind::Destructor - }; - ci.destructor = Some((kind, signature)); - } - CXCursor_CXXMethod => { - let is_const = cur.method_is_const(); - let method_kind = if is_static { - MethodKind::Static - } else if is_virtual { - MethodKind::Virtual { - pure_virtual: cur.method_is_pure_virtual(), - } - } else { - MethodKind::Normal - }; - - let method = - Method::new(method_kind, signature, is_const); - - ci.methods.push(method); - } - _ => unreachable!("How can we see this here?"), - } - } - CXCursor_NonTypeTemplateParameter => { - ci.has_non_type_template_params = true; - } - CXCursor_VarDecl => { - let linkage = cur.linkage(); - if linkage != CXLinkage_External && - linkage != CXLinkage_UniqueExternal - { - return CXChildVisit_Continue; - } - - let visibility = cur.visibility(); - if visibility != CXVisibility_Default { - return CXChildVisit_Continue; - } - - if let Ok(item) = Item::parse(cur, Some(potential_id), ctx) - { - ci.inner_vars.push(item.as_var_id_unchecked()); - } - } - // Intentionally not handled - CXCursor_CXXAccessSpecifier | - CXCursor_CXXFinalAttr | - CXCursor_FunctionTemplate | - CXCursor_ConversionFunction => {} - _ => { - warn!( - "unhandled comp member `{}` (kind {:?}) in `{}` ({})", - cur.spelling(), - clang::kind_to_str(cur.kind()), - cursor.spelling(), - cur.location() - ); - } - } - CXChildVisit_Continue - }); - - if let Some((ty, _, public, offset)) = maybe_anonymous_struct_field { - let field = - RawField::new(None, ty, None, None, None, public, offset); - ci.fields.append_raw_field(field); - } - - Ok(ci) - } - - fn kind_from_cursor( - cursor: &clang::Cursor, - ) -> Result { - use clang_sys::*; - Ok(match cursor.kind() { - CXCursor_UnionDecl => CompKind::Union, - CXCursor_ClassDecl | CXCursor_StructDecl => CompKind::Struct, - CXCursor_CXXBaseSpecifier | - CXCursor_ClassTemplatePartialSpecialization | - CXCursor_ClassTemplate => match cursor.template_kind() { - CXCursor_UnionDecl => CompKind::Union, - _ => CompKind::Struct, - }, - _ => { - warn!("Unknown kind for comp type: {:?}", cursor); - return Err(ParseError::Continue); - } - }) - } - - /// Get the set of types that were declared within this compound type - /// (e.g. nested class definitions). - pub fn inner_types(&self) -> &[TypeId] { - &self.inner_types - } - - /// Get the set of static variables declared within this compound type. - pub fn inner_vars(&self) -> &[VarId] { - &self.inner_vars - } - - /// Have we found a field with an opaque type that could potentially mess up - /// the layout of this compound type? - pub fn found_unknown_attr(&self) -> bool { - self.found_unknown_attr - } - - /// Is this compound type packed? - pub fn is_packed( - &self, - ctx: &BindgenContext, - layout: Option<&Layout>, - ) -> bool { - if self.packed_attr { - return true; - } - - // Even though `libclang` doesn't expose `#pragma packed(...)`, we can - // detect it through its effects. - if let Some(parent_layout) = layout { - let mut packed = false; - self.each_known_field_layout(ctx, |layout| { - packed = packed || layout.align > parent_layout.align; - }); - if packed { - info!("Found a struct that was defined within `#pragma packed(...)`"); - return true; - } - - if self.has_own_virtual_method && parent_layout.align == 1 { - return true; - } - } - - false - } - - /// Returns true if compound type has been forward declared - pub fn is_forward_declaration(&self) -> bool { - self.is_forward_declaration - } - - /// Compute this compound structure's bitfield allocation units. - pub fn compute_bitfield_units( - &mut self, - ctx: &BindgenContext, - layout: Option<&Layout>, - ) { - let packed = self.is_packed(ctx, layout); - self.fields.compute_bitfield_units(ctx, packed) - } - - /// Assign for each anonymous field a generated name. - pub fn deanonymize_fields(&mut self, ctx: &BindgenContext) { - self.fields.deanonymize_fields(ctx, &self.methods); - } - - /// Returns whether the current union can be represented as a Rust `union` - /// - /// Requirements: - /// 1. Current RustTarget allows for `untagged_union` - /// 2. Each field can derive `Copy` - /// 3. It's not zero-sized. - pub fn can_be_rust_union( - &self, - ctx: &BindgenContext, - layout: Option<&Layout>, - ) -> bool { - if !ctx.options().rust_features().untagged_union { - return false; - } - - if self.is_forward_declaration() { - return false; - } - - let all_can_copy = self.fields().iter().all(|f| match *f { - Field::DataMember(ref field_data) => { - field_data.ty().can_derive_copy(ctx) - } - Field::Bitfields(_) => true, - }); - - if !all_can_copy { - return false; - } - - if layout.map_or(false, |l| l.size == 0) { - return false; - } - - true - } -} - -impl DotAttributes for CompInfo { - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!(out, "CompKind{:?}", self.kind)?; - - if self.has_own_virtual_method { - writeln!(out, "has_vtabletrue")?; - } - - if self.has_destructor { - writeln!(out, "has_destructortrue")?; - } - - if self.has_nonempty_base { - writeln!(out, "has_nonempty_basetrue")?; - } - - if self.has_non_type_template_params { - writeln!( - out, - "has_non_type_template_paramstrue" - )?; - } - - if self.packed_attr { - writeln!(out, "packed_attrtrue")?; - } - - if self.is_forward_declaration { - writeln!( - out, - "is_forward_declarationtrue" - )?; - } - - if !self.fields().is_empty() { - writeln!(out, r#"fields"#)?; - for field in self.fields() { - field.dot_attributes(ctx, out)?; - } - writeln!(out, "
")?; - } - - Ok(()) - } -} - -impl IsOpaque for CompInfo { - type Extra = Option; - - fn is_opaque(&self, ctx: &BindgenContext, layout: &Option) -> bool { - if self.has_non_type_template_params { - return true; - } - - // When we do not have the layout for a bitfield's type (for example, it - // is a type parameter), then we can't compute bitfield units. We are - // left with no choice but to make the whole struct opaque, or else we - // might generate structs with incorrect sizes and alignments. - if let CompFields::Error = self.fields { - return true; - } - - // Bitfields with a width that is larger than their unit's width have - // some strange things going on, and the best we can do is make the - // whole struct opaque. - if self.fields().iter().any(|f| match *f { - Field::DataMember(_) => false, - Field::Bitfields(ref unit) => unit.bitfields().iter().any(|bf| { - let bitfield_layout = ctx - .resolve_type(bf.ty()) - .layout(ctx) - .expect("Bitfield without layout? Gah!"); - bf.width() / 8 > bitfield_layout.size as u32 - }), - }) { - return true; - } - - if !ctx.options().rust_features().repr_packed_n { - // If we don't have `#[repr(packed(N)]`, the best we can - // do is make this struct opaque. - // - // See https://github.com/rust-lang/rust-bindgen/issues/537 and - // https://github.com/rust-lang/rust/issues/33158 - if self.is_packed(ctx, layout.as_ref()) && - layout.map_or(false, |l| l.align > 1) - { - warn!("Found a type that is both packed and aligned to greater than \ - 1; Rust before version 1.33 doesn't have `#[repr(packed(N))]`, so we \ - are treating it as opaque. You may wish to set bindgen's rust target \ - version to 1.33 or later to enable `#[repr(packed(N))]` support."); - return true; - } - } - - false - } -} - -impl TemplateParameters for CompInfo { - fn self_template_params(&self, _ctx: &BindgenContext) -> Vec { - self.template_params.clone() - } -} - -impl Trace for CompInfo { - type Extra = Item; - - fn trace(&self, context: &BindgenContext, tracer: &mut T, item: &Item) - where - T: Tracer, - { - for p in item.all_template_params(context) { - tracer.visit_kind(p.into(), EdgeKind::TemplateParameterDefinition); - } - - for ty in self.inner_types() { - tracer.visit_kind(ty.into(), EdgeKind::InnerType); - } - - for &var in self.inner_vars() { - tracer.visit_kind(var.into(), EdgeKind::InnerVar); - } - - for method in self.methods() { - tracer.visit_kind(method.signature.into(), EdgeKind::Method); - } - - if let Some((_kind, signature)) = self.destructor() { - tracer.visit_kind(signature.into(), EdgeKind::Destructor); - } - - for ctor in self.constructors() { - tracer.visit_kind(ctor.into(), EdgeKind::Constructor); - } - - // Base members and fields are not generated for opaque types (but all - // of the above things are) so stop here. - if item.is_opaque(context, &()) { - return; - } - - for base in self.base_members() { - tracer.visit_kind(base.ty.into(), EdgeKind::BaseMember); - } - - self.fields.trace(context, tracer, &()); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/context.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/context.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/context.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/context.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2835 +0,0 @@ -//! Common context that is passed around during parsing and codegen. - -use super::super::time::Timer; -use super::analysis::{ - analyze, as_cannot_derive_set, CannotDerive, DeriveTrait, - HasDestructorAnalysis, HasFloat, HasTypeParameterInArray, - HasVtableAnalysis, HasVtableResult, SizednessAnalysis, SizednessResult, - UsedTemplateParameters, -}; -use super::derive::{ - CanDerive, CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, - CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, -}; -use super::function::Function; -use super::int::IntKind; -use super::item::{IsOpaque, Item, ItemAncestors, ItemSet}; -use super::item_kind::ItemKind; -use super::module::{Module, ModuleKind}; -use super::template::{TemplateInstantiation, TemplateParameters}; -use super::traversal::{self, Edge, ItemTraversal}; -use super::ty::{FloatKind, Type, TypeKind}; -use crate::callbacks::ParseCallbacks; -use crate::clang::{self, Cursor}; -use crate::parse::ClangItemParser; -use crate::BindgenOptions; -use crate::{Entry, HashMap, HashSet}; -use cexpr; -use clang_sys; -use proc_macro2::{Ident, Span}; -use std::borrow::Cow; -use std::cell::{Cell, RefCell}; -use std::collections::{BTreeSet, HashMap as StdHashMap}; -use std::iter::IntoIterator; -use std::mem; - -/// An identifier for some kind of IR item. -#[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, Hash)] -pub struct ItemId(usize); - -macro_rules! item_id_newtype { - ( - $( #[$attr:meta] )* - pub struct $name:ident(ItemId) - where - $( #[$checked_attr:meta] )* - checked = $checked:ident with $check_method:ident, - $( #[$expected_attr:meta] )* - expected = $expected:ident, - $( #[$unchecked_attr:meta] )* - unchecked = $unchecked:ident; - ) => { - $( #[$attr] )* - #[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, Hash)] - pub struct $name(ItemId); - - impl $name { - /// Create an `ItemResolver` from this id. - pub fn into_resolver(self) -> ItemResolver { - let id: ItemId = self.into(); - id.into() - } - } - - impl ::std::cmp::PartialEq for $name - where - T: Copy + Into - { - fn eq(&self, rhs: &T) -> bool { - let rhs: ItemId = (*rhs).into(); - self.0 == rhs - } - } - - impl From<$name> for ItemId { - fn from(id: $name) -> ItemId { - id.0 - } - } - - impl<'a> From<&'a $name> for ItemId { - fn from(id: &'a $name) -> ItemId { - id.0 - } - } - - impl ItemId { - $( #[$checked_attr] )* - pub fn $checked(&self, ctx: &BindgenContext) -> Option<$name> { - if ctx.resolve_item(*self).kind().$check_method() { - Some($name(*self)) - } else { - None - } - } - - $( #[$expected_attr] )* - pub fn $expected(&self, ctx: &BindgenContext) -> $name { - self.$checked(ctx) - .expect(concat!( - stringify!($expected), - " called with ItemId that points to the wrong ItemKind" - )) - } - - $( #[$unchecked_attr] )* - pub fn $unchecked(&self) -> $name { - $name(*self) - } - } - } -} - -item_id_newtype! { - /// An identifier for an `Item` whose `ItemKind` is known to be - /// `ItemKind::Type`. - pub struct TypeId(ItemId) - where - /// Convert this `ItemId` into a `TypeId` if its associated item is a type, - /// otherwise return `None`. - checked = as_type_id with is_type, - - /// Convert this `ItemId` into a `TypeId`. - /// - /// If this `ItemId` does not point to a type, then panic. - expected = expect_type_id, - - /// Convert this `ItemId` into a `TypeId` without actually checking whether - /// this id actually points to a `Type`. - unchecked = as_type_id_unchecked; -} - -item_id_newtype! { - /// An identifier for an `Item` whose `ItemKind` is known to be - /// `ItemKind::Module`. - pub struct ModuleId(ItemId) - where - /// Convert this `ItemId` into a `ModuleId` if its associated item is a - /// module, otherwise return `None`. - checked = as_module_id with is_module, - - /// Convert this `ItemId` into a `ModuleId`. - /// - /// If this `ItemId` does not point to a module, then panic. - expected = expect_module_id, - - /// Convert this `ItemId` into a `ModuleId` without actually checking - /// whether this id actually points to a `Module`. - unchecked = as_module_id_unchecked; -} - -item_id_newtype! { - /// An identifier for an `Item` whose `ItemKind` is known to be - /// `ItemKind::Var`. - pub struct VarId(ItemId) - where - /// Convert this `ItemId` into a `VarId` if its associated item is a var, - /// otherwise return `None`. - checked = as_var_id with is_var, - - /// Convert this `ItemId` into a `VarId`. - /// - /// If this `ItemId` does not point to a var, then panic. - expected = expect_var_id, - - /// Convert this `ItemId` into a `VarId` without actually checking whether - /// this id actually points to a `Var`. - unchecked = as_var_id_unchecked; -} - -item_id_newtype! { - /// An identifier for an `Item` whose `ItemKind` is known to be - /// `ItemKind::Function`. - pub struct FunctionId(ItemId) - where - /// Convert this `ItemId` into a `FunctionId` if its associated item is a function, - /// otherwise return `None`. - checked = as_function_id with is_function, - - /// Convert this `ItemId` into a `FunctionId`. - /// - /// If this `ItemId` does not point to a function, then panic. - expected = expect_function_id, - - /// Convert this `ItemId` into a `FunctionId` without actually checking whether - /// this id actually points to a `Function`. - unchecked = as_function_id_unchecked; -} - -impl From for usize { - fn from(id: ItemId) -> usize { - id.0 - } -} - -impl ItemId { - /// Get a numeric representation of this id. - pub fn as_usize(&self) -> usize { - (*self).into() - } -} - -impl ::std::cmp::PartialEq for ItemId -where - T: Copy + Into, -{ - fn eq(&self, rhs: &T) -> bool { - let rhs: ItemId = (*rhs).into(); - self.0 == rhs.0 - } -} - -impl CanDeriveDebug for T -where - T: Copy + Into, -{ - fn can_derive_debug(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_debug && ctx.lookup_can_derive_debug(*self) - } -} - -impl CanDeriveDefault for T -where - T: Copy + Into, -{ - fn can_derive_default(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_default && ctx.lookup_can_derive_default(*self) - } -} - -impl CanDeriveCopy for T -where - T: Copy + Into, -{ - fn can_derive_copy(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_copy && ctx.lookup_can_derive_copy(*self) - } -} - -impl CanDeriveHash for T -where - T: Copy + Into, -{ - fn can_derive_hash(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_hash && ctx.lookup_can_derive_hash(*self) - } -} - -impl CanDerivePartialOrd for T -where - T: Copy + Into, -{ - fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_partialord && - ctx.lookup_can_derive_partialeq_or_partialord(*self) == - CanDerive::Yes - } -} - -impl CanDerivePartialEq for T -where - T: Copy + Into, -{ - fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_partialeq && - ctx.lookup_can_derive_partialeq_or_partialord(*self) == - CanDerive::Yes - } -} - -impl CanDeriveEq for T -where - T: Copy + Into, -{ - fn can_derive_eq(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_eq && - ctx.lookup_can_derive_partialeq_or_partialord(*self) == - CanDerive::Yes && - !ctx.lookup_has_float(*self) - } -} - -impl CanDeriveOrd for T -where - T: Copy + Into, -{ - fn can_derive_ord(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_ord && - ctx.lookup_can_derive_partialeq_or_partialord(*self) == - CanDerive::Yes && - !ctx.lookup_has_float(*self) - } -} - -/// A key used to index a resolved type, so we only process it once. -/// -/// This is almost always a USR string (an unique identifier generated by -/// clang), but it can also be the canonical declaration if the type is unnamed, -/// in which case clang may generate the same USR for multiple nested unnamed -/// types. -#[derive(Eq, PartialEq, Hash, Debug)] -enum TypeKey { - Usr(String), - Declaration(Cursor), -} - -/// A context used during parsing and generation of structs. -#[derive(Debug)] -pub struct BindgenContext { - /// The map of all the items parsed so far, keyed off ItemId. - items: Vec>, - - /// Clang USR to type map. This is needed to be able to associate types with - /// item ids during parsing. - types: HashMap, - - /// Maps from a cursor to the item id of the named template type parameter - /// for that cursor. - type_params: HashMap, - - /// A cursor to module map. Similar reason than above. - modules: HashMap, - - /// The root module, this is guaranteed to be an item of kind Module. - root_module: ModuleId, - - /// Current module being traversed. - current_module: ModuleId, - - /// A HashMap keyed on a type definition, and whose value is the parent id - /// of the declaration. - /// - /// This is used to handle the cases where the semantic and the lexical - /// parents of the cursor differ, like when a nested class is defined - /// outside of the parent class. - semantic_parents: HashMap, - - /// A stack with the current type declarations and types we're parsing. This - /// is needed to avoid infinite recursion when parsing a type like: - /// - /// struct c { struct c* next; }; - /// - /// This means effectively, that a type has a potential ID before knowing if - /// it's a correct type. But that's not important in practice. - /// - /// We could also use the `types` HashMap, but my intention with it is that - /// only valid types and declarations end up there, and this could - /// potentially break that assumption. - currently_parsed_types: Vec, - - /// A map with all the already parsed macro names. This is done to avoid - /// hard errors while parsing duplicated macros, as well to allow macro - /// expression parsing. - /// - /// This needs to be an std::HashMap because the cexpr API requires it. - parsed_macros: StdHashMap, cexpr::expr::EvalResult>, - - /// A set of all the included filenames. - deps: BTreeSet, - - /// The active replacements collected from replaces="xxx" annotations. - replacements: HashMap, ItemId>, - - collected_typerefs: bool, - - in_codegen: bool, - - /// The clang index for parsing. - index: clang::Index, - - /// The translation unit for parsing. - translation_unit: clang::TranslationUnit, - - /// Target information that can be useful for some stuff. - target_info: Option, - - /// The options given by the user via cli or other medium. - options: BindgenOptions, - - /// Whether a bindgen complex was generated - generated_bindgen_complex: Cell, - - /// The set of `ItemId`s that are allowlisted. This the very first thing - /// computed after parsing our IR, and before running any of our analyses. - allowlisted: Option, - - /// Cache for calls to `ParseCallbacks::blocklisted_type_implements_trait` - blocklisted_types_implement_traits: - RefCell>>, - - /// The set of `ItemId`s that are allowlisted for code generation _and_ that - /// we should generate accounting for the codegen options. - /// - /// It's computed right after computing the allowlisted items. - codegen_items: Option, - - /// Map from an item's id to the set of template parameter items that it - /// uses. See `ir::named` for more details. Always `Some` during the codegen - /// phase. - used_template_parameters: Option>, - - /// The set of `TypeKind::Comp` items found during parsing that need their - /// bitfield allocation units computed. Drained in `compute_bitfield_units`. - need_bitfield_allocation: Vec, - - /// The set of (`ItemId`s of) types that can't derive debug. - /// - /// This is populated when we enter codegen by `compute_cannot_derive_debug` - /// and is always `None` before that and `Some` after. - cannot_derive_debug: Option>, - - /// The set of (`ItemId`s of) types that can't derive default. - /// - /// This is populated when we enter codegen by `compute_cannot_derive_default` - /// and is always `None` before that and `Some` after. - cannot_derive_default: Option>, - - /// The set of (`ItemId`s of) types that can't derive copy. - /// - /// This is populated when we enter codegen by `compute_cannot_derive_copy` - /// and is always `None` before that and `Some` after. - cannot_derive_copy: Option>, - - /// The set of (`ItemId`s of) types that can't derive copy in array. - /// - /// This is populated when we enter codegen by `compute_cannot_derive_copy` - /// and is always `None` before that and `Some` after. - cannot_derive_copy_in_array: Option>, - - /// The set of (`ItemId`s of) types that can't derive hash. - /// - /// This is populated when we enter codegen by `compute_can_derive_hash` - /// and is always `None` before that and `Some` after. - cannot_derive_hash: Option>, - - /// The map why specified `ItemId`s of) types that can't derive hash. - /// - /// This is populated when we enter codegen by - /// `compute_cannot_derive_partialord_partialeq_or_eq` and is always `None` - /// before that and `Some` after. - cannot_derive_partialeq_or_partialord: Option>, - - /// The sizedness of types. - /// - /// This is populated by `compute_sizedness` and is always `None` before - /// that function is invoked and `Some` afterwards. - sizedness: Option>, - - /// The set of (`ItemId's of`) types that has vtable. - /// - /// Populated when we enter codegen by `compute_has_vtable`; always `None` - /// before that and `Some` after. - have_vtable: Option>, - - /// The set of (`ItemId's of`) types that has destructor. - /// - /// Populated when we enter codegen by `compute_has_destructor`; always `None` - /// before that and `Some` after. - have_destructor: Option>, - - /// The set of (`ItemId's of`) types that has array. - /// - /// Populated when we enter codegen by `compute_has_type_param_in_array`; always `None` - /// before that and `Some` after. - has_type_param_in_array: Option>, - - /// The set of (`ItemId's of`) types that has float. - /// - /// Populated when we enter codegen by `compute_has_float`; always `None` - /// before that and `Some` after. - has_float: Option>, -} - -/// A traversal of allowlisted items. -struct AllowlistedItemsTraversal<'ctx> { - ctx: &'ctx BindgenContext, - traversal: ItemTraversal< - 'ctx, - ItemSet, - Vec, - for<'a> fn(&'a BindgenContext, Edge) -> bool, - >, -} - -impl<'ctx> Iterator for AllowlistedItemsTraversal<'ctx> { - type Item = ItemId; - - fn next(&mut self) -> Option { - loop { - let id = self.traversal.next()?; - - if self.ctx.resolve_item(id).is_blocklisted(self.ctx) { - continue; - } - - return Some(id); - } - } -} - -impl<'ctx> AllowlistedItemsTraversal<'ctx> { - /// Construct a new allowlisted items traversal. - pub fn new( - ctx: &'ctx BindgenContext, - roots: R, - predicate: for<'a> fn(&'a BindgenContext, Edge) -> bool, - ) -> Self - where - R: IntoIterator, - { - AllowlistedItemsTraversal { - ctx, - traversal: ItemTraversal::new(ctx, roots, predicate), - } - } -} - -impl BindgenContext { - /// Construct the context for the given `options`. - pub(crate) fn new(options: BindgenOptions) -> Self { - // TODO(emilio): Use the CXTargetInfo here when available. - // - // see: https://reviews.llvm.org/D32389 - let index = clang::Index::new(false, true); - - let parse_options = - clang_sys::CXTranslationUnit_DetailedPreprocessingRecord; - - let translation_unit = { - let _t = - Timer::new("translation_unit").with_output(options.time_phases); - - clang::TranslationUnit::parse( - &index, - "", - &options.clang_args, - &options.input_unsaved_files, - parse_options, - ).expect("libclang error; possible causes include: -- Invalid flag syntax -- Unrecognized flags -- Invalid flag arguments -- File I/O errors -- Host vs. target architecture mismatch -If you encounter an error missing from this list, please file an issue or a PR!") - }; - - let target_info = clang::TargetInfo::new(&translation_unit); - let root_module = Self::build_root_module(ItemId(0)); - let root_module_id = root_module.id().as_module_id_unchecked(); - - // depfiles need to include the explicitly listed headers too - let mut deps = BTreeSet::default(); - if let Some(filename) = &options.input_header { - deps.insert(filename.clone()); - } - deps.extend(options.extra_input_headers.iter().cloned()); - - BindgenContext { - items: vec![Some(root_module)], - deps, - types: Default::default(), - type_params: Default::default(), - modules: Default::default(), - root_module: root_module_id, - current_module: root_module_id, - semantic_parents: Default::default(), - currently_parsed_types: vec![], - parsed_macros: Default::default(), - replacements: Default::default(), - collected_typerefs: false, - in_codegen: false, - index, - translation_unit, - target_info, - options, - generated_bindgen_complex: Cell::new(false), - allowlisted: None, - blocklisted_types_implement_traits: Default::default(), - codegen_items: None, - used_template_parameters: None, - need_bitfield_allocation: Default::default(), - cannot_derive_debug: None, - cannot_derive_default: None, - cannot_derive_copy: None, - cannot_derive_copy_in_array: None, - cannot_derive_hash: None, - cannot_derive_partialeq_or_partialord: None, - sizedness: None, - have_vtable: None, - have_destructor: None, - has_type_param_in_array: None, - has_float: None, - } - } - - /// Returns `true` if the target architecture is wasm32 - pub fn is_target_wasm32(&self) -> bool { - match self.target_info { - Some(ref ti) => ti.triple.starts_with("wasm32-"), - None => false, - } - } - - /// Creates a timer for the current bindgen phase. If time_phases is `true`, - /// the timer will print to stderr when it is dropped, otherwise it will do - /// nothing. - pub fn timer<'a>(&self, name: &'a str) -> Timer<'a> { - Timer::new(name).with_output(self.options.time_phases) - } - - /// Returns the pointer width to use for the target for the current - /// translation. - pub fn target_pointer_size(&self) -> usize { - if let Some(ref ti) = self.target_info { - return ti.pointer_width / 8; - } - mem::size_of::<*mut ()>() - } - - /// Get the stack of partially parsed types that we are in the middle of - /// parsing. - pub fn currently_parsed_types(&self) -> &[PartialType] { - &self.currently_parsed_types[..] - } - - /// Begin parsing the given partial type, and push it onto the - /// `currently_parsed_types` stack so that we won't infinite recurse if we - /// run into a reference to it while parsing it. - pub fn begin_parsing(&mut self, partial_ty: PartialType) { - self.currently_parsed_types.push(partial_ty); - } - - /// Finish parsing the current partial type, pop it off the - /// `currently_parsed_types` stack, and return it. - pub fn finish_parsing(&mut self) -> PartialType { - self.currently_parsed_types.pop().expect( - "should have been parsing a type, if we finished parsing a type", - ) - } - - /// Get the user-provided callbacks by reference, if any. - pub fn parse_callbacks(&self) -> Option<&dyn ParseCallbacks> { - self.options().parse_callbacks.as_deref() - } - - /// Add another path to the set of included files. - pub fn include_file(&mut self, filename: String) { - if let Some(cbs) = self.parse_callbacks() { - cbs.include_file(&filename); - } - self.deps.insert(filename); - } - - /// Get any included files. - pub fn deps(&self) -> &BTreeSet { - &self.deps - } - - /// Define a new item. - /// - /// This inserts it into the internal items set, and its type into the - /// internal types set. - pub fn add_item( - &mut self, - item: Item, - declaration: Option, - location: Option, - ) { - debug!( - "BindgenContext::add_item({:?}, declaration: {:?}, loc: {:?}", - item, declaration, location - ); - debug_assert!( - declaration.is_some() || - !item.kind().is_type() || - item.kind().expect_type().is_builtin_or_type_param() || - item.kind().expect_type().is_opaque(self, &item) || - item.kind().expect_type().is_unresolved_ref(), - "Adding a type without declaration?" - ); - - let id = item.id(); - let is_type = item.kind().is_type(); - let is_unnamed = is_type && item.expect_type().name().is_none(); - let is_template_instantiation = - is_type && item.expect_type().is_template_instantiation(); - - if item.id() != self.root_module { - self.add_item_to_module(&item); - } - - if is_type && item.expect_type().is_comp() { - self.need_bitfield_allocation.push(id); - } - - let old_item = mem::replace(&mut self.items[id.0], Some(item)); - assert!( - old_item.is_none(), - "should not have already associated an item with the given id" - ); - - // Unnamed items can have an USR, but they can't be referenced from - // other sites explicitly and the USR can match if the unnamed items are - // nested, so don't bother tracking them. - if !is_type || is_template_instantiation { - return; - } - if let Some(mut declaration) = declaration { - if !declaration.is_valid() { - if let Some(location) = location { - if location.is_template_like() { - declaration = location; - } - } - } - declaration = declaration.canonical(); - if !declaration.is_valid() { - // This could happen, for example, with types like `int*` or - // similar. - // - // Fortunately, we don't care about those types being - // duplicated, so we can just ignore them. - debug!( - "Invalid declaration {:?} found for type {:?}", - declaration, - self.resolve_item_fallible(id) - .unwrap() - .kind() - .expect_type() - ); - return; - } - - let key = if is_unnamed { - TypeKey::Declaration(declaration) - } else if let Some(usr) = declaration.usr() { - TypeKey::Usr(usr) - } else { - warn!( - "Valid declaration with no USR: {:?}, {:?}", - declaration, location - ); - TypeKey::Declaration(declaration) - }; - - let old = self.types.insert(key, id.as_type_id_unchecked()); - debug_assert_eq!(old, None); - } - } - - /// Ensure that every item (other than the root module) is in a module's - /// children list. This is to make sure that every allowlisted item get's - /// codegen'd, even if its parent is not allowlisted. See issue #769 for - /// details. - fn add_item_to_module(&mut self, item: &Item) { - assert!(item.id() != self.root_module); - assert!(self.resolve_item_fallible(item.id()).is_none()); - - if let Some(ref mut parent) = self.items[item.parent_id().0] { - if let Some(module) = parent.as_module_mut() { - debug!( - "add_item_to_module: adding {:?} as child of parent module {:?}", - item.id(), - item.parent_id() - ); - - module.children_mut().insert(item.id()); - return; - } - } - - debug!( - "add_item_to_module: adding {:?} as child of current module {:?}", - item.id(), - self.current_module - ); - - self.items[(self.current_module.0).0] - .as_mut() - .expect("Should always have an item for self.current_module") - .as_module_mut() - .expect("self.current_module should always be a module") - .children_mut() - .insert(item.id()); - } - - /// Add a new named template type parameter to this context's item set. - pub fn add_type_param(&mut self, item: Item, definition: clang::Cursor) { - debug!( - "BindgenContext::add_type_param: item = {:?}; definition = {:?}", - item, definition - ); - - assert!( - item.expect_type().is_type_param(), - "Should directly be a named type, not a resolved reference or anything" - ); - assert_eq!( - definition.kind(), - clang_sys::CXCursor_TemplateTypeParameter - ); - - self.add_item_to_module(&item); - - let id = item.id(); - let old_item = mem::replace(&mut self.items[id.0], Some(item)); - assert!( - old_item.is_none(), - "should not have already associated an item with the given id" - ); - - let old_named_ty = self - .type_params - .insert(definition, id.as_type_id_unchecked()); - assert!( - old_named_ty.is_none(), - "should not have already associated a named type with this id" - ); - } - - /// Get the named type defined at the given cursor location, if we've - /// already added one. - pub fn get_type_param(&self, definition: &clang::Cursor) -> Option { - assert_eq!( - definition.kind(), - clang_sys::CXCursor_TemplateTypeParameter - ); - self.type_params.get(definition).cloned() - } - - // TODO: Move all this syntax crap to other part of the code. - - /// Mangles a name so it doesn't conflict with any keyword. - #[rustfmt::skip] - pub fn rust_mangle<'a>(&self, name: &'a str) -> Cow<'a, str> { - if name.contains('@') || - name.contains('?') || - name.contains('$') || - matches!( - name, - "abstract" | "alignof" | "as" | "async" | "become" | - "box" | "break" | "const" | "continue" | "crate" | "do" | - "dyn" | "else" | "enum" | "extern" | "false" | "final" | - "fn" | "for" | "if" | "impl" | "in" | "let" | "loop" | - "macro" | "match" | "mod" | "move" | "mut" | "offsetof" | - "override" | "priv" | "proc" | "pub" | "pure" | "ref" | - "return" | "Self" | "self" | "sizeof" | "static" | - "struct" | "super" | "trait" | "true" | "try" | "type" | "typeof" | - "unsafe" | "unsized" | "use" | "virtual" | "where" | - "while" | "yield" | "str" | "bool" | "f32" | "f64" | - "usize" | "isize" | "u128" | "i128" | "u64" | "i64" | - "u32" | "i32" | "u16" | "i16" | "u8" | "i8" | "_" - ) - { - let mut s = name.to_owned(); - s = s.replace("@", "_"); - s = s.replace("?", "_"); - s = s.replace("$", "_"); - s.push('_'); - return Cow::Owned(s); - } - Cow::Borrowed(name) - } - - /// Returns a mangled name as a rust identifier. - pub fn rust_ident(&self, name: S) -> Ident - where - S: AsRef, - { - self.rust_ident_raw(self.rust_mangle(name.as_ref())) - } - - /// Returns a mangled name as a rust identifier. - pub fn rust_ident_raw(&self, name: T) -> Ident - where - T: AsRef, - { - Ident::new(name.as_ref(), Span::call_site()) - } - - /// Iterate over all items that have been defined. - pub fn items(&self) -> impl Iterator { - self.items.iter().enumerate().filter_map(|(index, item)| { - let item = item.as_ref()?; - Some((ItemId(index), item)) - }) - } - - /// Have we collected all unresolved type references yet? - pub fn collected_typerefs(&self) -> bool { - self.collected_typerefs - } - - /// Gather all the unresolved type references. - fn collect_typerefs( - &mut self, - ) -> Vec<(ItemId, clang::Type, clang::Cursor, Option)> { - debug_assert!(!self.collected_typerefs); - self.collected_typerefs = true; - let mut typerefs = vec![]; - - for (id, item) in self.items() { - let kind = item.kind(); - let ty = match kind.as_type() { - Some(ty) => ty, - None => continue, - }; - - if let TypeKind::UnresolvedTypeRef(ref ty, loc, parent_id) = - *ty.kind() - { - typerefs.push((id, *ty, loc, parent_id)); - }; - } - typerefs - } - - /// Collect all of our unresolved type references and resolve them. - fn resolve_typerefs(&mut self) { - let _t = self.timer("resolve_typerefs"); - - let typerefs = self.collect_typerefs(); - - for (id, ty, loc, parent_id) in typerefs { - let _resolved = - { - let resolved = Item::from_ty(&ty, loc, parent_id, self) - .unwrap_or_else(|_| { - warn!("Could not resolve type reference, falling back \ - to opaque blob"); - Item::new_opaque_type(self.next_item_id(), &ty, self) - }); - - let item = self.items[id.0].as_mut().unwrap(); - *item.kind_mut().as_type_mut().unwrap().kind_mut() = - TypeKind::ResolvedTypeRef(resolved); - resolved - }; - - // Something in the STL is trolling me. I don't need this assertion - // right now, but worth investigating properly once this lands. - // - // debug_assert!(self.items.get(&resolved).is_some(), "How?"); - // - // if let Some(parent_id) = parent_id { - // assert_eq!(self.items[&resolved].parent_id(), parent_id); - // } - } - } - - /// Temporarily loan `Item` with the given `ItemId`. This provides means to - /// mutably borrow `Item` while having a reference to `BindgenContext`. - /// - /// `Item` with the given `ItemId` is removed from the context, given - /// closure is executed and then `Item` is placed back. - /// - /// # Panics - /// - /// Panics if attempt to resolve given `ItemId` inside the given - /// closure is made. - fn with_loaned_item(&mut self, id: ItemId, f: F) -> T - where - F: (FnOnce(&BindgenContext, &mut Item) -> T), - { - let mut item = self.items[id.0].take().unwrap(); - - let result = f(self, &mut item); - - let existing = mem::replace(&mut self.items[id.0], Some(item)); - assert!(existing.is_none()); - - result - } - - /// Compute the bitfield allocation units for all `TypeKind::Comp` items we - /// parsed. - fn compute_bitfield_units(&mut self) { - let _t = self.timer("compute_bitfield_units"); - - assert!(self.collected_typerefs()); - - let need_bitfield_allocation = - mem::take(&mut self.need_bitfield_allocation); - for id in need_bitfield_allocation { - self.with_loaned_item(id, |ctx, item| { - let ty = item.kind_mut().as_type_mut().unwrap(); - let layout = ty.layout(ctx); - ty.as_comp_mut() - .unwrap() - .compute_bitfield_units(ctx, layout.as_ref()); - }); - } - } - - /// Assign a new generated name for each anonymous field. - fn deanonymize_fields(&mut self) { - let _t = self.timer("deanonymize_fields"); - - let comp_item_ids: Vec = self - .items() - .filter_map(|(id, item)| { - if item.kind().as_type()?.is_comp() { - return Some(id); - } - None - }) - .collect(); - - for id in comp_item_ids { - self.with_loaned_item(id, |ctx, item| { - item.kind_mut() - .as_type_mut() - .unwrap() - .as_comp_mut() - .unwrap() - .deanonymize_fields(ctx); - }); - } - } - - /// Iterate over all items and replace any item that has been named in a - /// `replaces="SomeType"` annotation with the replacement type. - fn process_replacements(&mut self) { - let _t = self.timer("process_replacements"); - if self.replacements.is_empty() { - debug!("No replacements to process"); - return; - } - - // FIXME: This is linear, but the replaces="xxx" annotation was already - // there, and for better or worse it's useful, sigh... - // - // We leverage the ResolvedTypeRef thing, though, which is cool :P. - - let mut replacements = vec![]; - - for (id, item) in self.items() { - if item.annotations().use_instead_of().is_some() { - continue; - } - - // Calls to `canonical_name` are expensive, so eagerly filter out - // items that cannot be replaced. - let ty = match item.kind().as_type() { - Some(ty) => ty, - None => continue, - }; - - match *ty.kind() { - TypeKind::Comp(..) | - TypeKind::TemplateAlias(..) | - TypeKind::Enum(..) | - TypeKind::Alias(..) => {} - _ => continue, - } - - let path = item.path_for_allowlisting(self); - let replacement = self.replacements.get(&path[1..]); - - if let Some(replacement) = replacement { - if *replacement != id { - // We set this just after parsing the annotation. It's - // very unlikely, but this can happen. - if self.resolve_item_fallible(*replacement).is_some() { - replacements.push(( - id.expect_type_id(self), - replacement.expect_type_id(self), - )); - } - } - } - } - - for (id, replacement_id) in replacements { - debug!("Replacing {:?} with {:?}", id, replacement_id); - let new_parent = { - let item_id: ItemId = id.into(); - let item = self.items[item_id.0].as_mut().unwrap(); - *item.kind_mut().as_type_mut().unwrap().kind_mut() = - TypeKind::ResolvedTypeRef(replacement_id); - item.parent_id() - }; - - // Relocate the replacement item from where it was declared, to - // where the thing it is replacing was declared. - // - // First, we'll make sure that its parent id is correct. - - let old_parent = self.resolve_item(replacement_id).parent_id(); - if new_parent == old_parent { - // Same parent and therefore also same containing - // module. Nothing to do here. - continue; - } - - let replacement_item_id: ItemId = replacement_id.into(); - self.items[replacement_item_id.0] - .as_mut() - .unwrap() - .set_parent_for_replacement(new_parent); - - // Second, make sure that it is in the correct module's children - // set. - - let old_module = { - let immut_self = &*self; - old_parent - .ancestors(immut_self) - .chain(Some(immut_self.root_module.into())) - .find(|id| { - let item = immut_self.resolve_item(*id); - item.as_module().map_or(false, |m| { - m.children().contains(&replacement_id.into()) - }) - }) - }; - let old_module = old_module - .expect("Every replacement item should be in a module"); - - let new_module = { - let immut_self = &*self; - new_parent - .ancestors(immut_self) - .find(|id| immut_self.resolve_item(*id).is_module()) - }; - let new_module = - new_module.unwrap_or_else(|| self.root_module.into()); - - if new_module == old_module { - // Already in the correct module. - continue; - } - - self.items[old_module.0] - .as_mut() - .unwrap() - .as_module_mut() - .unwrap() - .children_mut() - .remove(&replacement_id.into()); - - self.items[new_module.0] - .as_mut() - .unwrap() - .as_module_mut() - .unwrap() - .children_mut() - .insert(replacement_id.into()); - } - } - - /// Enter the code generation phase, invoke the given callback `cb`, and - /// leave the code generation phase. - pub(crate) fn gen(mut self, cb: F) -> (Out, BindgenOptions) - where - F: FnOnce(&Self) -> Out, - { - self.in_codegen = true; - - self.resolve_typerefs(); - self.compute_bitfield_units(); - self.process_replacements(); - - self.deanonymize_fields(); - - self.assert_no_dangling_references(); - - // Compute the allowlisted set after processing replacements and - // resolving type refs, as those are the final mutations of the IR - // graph, and their completion means that the IR graph is now frozen. - self.compute_allowlisted_and_codegen_items(); - - // Make sure to do this after processing replacements, since that messes - // with the parentage and module children, and we want to assert that it - // messes with them correctly. - self.assert_every_item_in_a_module(); - - self.compute_has_vtable(); - self.compute_sizedness(); - self.compute_has_destructor(); - self.find_used_template_parameters(); - self.compute_cannot_derive_debug(); - self.compute_cannot_derive_default(); - self.compute_cannot_derive_copy(); - self.compute_has_type_param_in_array(); - self.compute_has_float(); - self.compute_cannot_derive_hash(); - self.compute_cannot_derive_partialord_partialeq_or_eq(); - - let ret = cb(&self); - (ret, self.options) - } - - /// When the `testing_only_extra_assertions` feature is enabled, this - /// function walks the IR graph and asserts that we do not have any edges - /// referencing an ItemId for which we do not have an associated IR item. - fn assert_no_dangling_references(&self) { - if cfg!(feature = "testing_only_extra_assertions") { - for _ in self.assert_no_dangling_item_traversal() { - // The iterator's next method does the asserting for us. - } - } - } - - fn assert_no_dangling_item_traversal( - &self, - ) -> traversal::AssertNoDanglingItemsTraversal { - assert!(self.in_codegen_phase()); - assert!(self.current_module == self.root_module); - - let roots = self.items().map(|(id, _)| id); - traversal::AssertNoDanglingItemsTraversal::new( - self, - roots, - traversal::all_edges, - ) - } - - /// When the `testing_only_extra_assertions` feature is enabled, walk over - /// every item and ensure that it is in the children set of one of its - /// module ancestors. - fn assert_every_item_in_a_module(&self) { - if cfg!(feature = "testing_only_extra_assertions") { - assert!(self.in_codegen_phase()); - assert!(self.current_module == self.root_module); - - for (id, _item) in self.items() { - if id == self.root_module { - continue; - } - - assert!( - { - let id = id - .into_resolver() - .through_type_refs() - .through_type_aliases() - .resolve(self) - .id(); - id.ancestors(self) - .chain(Some(self.root_module.into())) - .any(|ancestor| { - debug!( - "Checking if {:?} is a child of {:?}", - id, ancestor - ); - self.resolve_item(ancestor) - .as_module() - .map_or(false, |m| { - m.children().contains(&id) - }) - }) - }, - "{:?} should be in some ancestor module's children set", - id - ); - } - } - } - - /// Compute for every type whether it is sized or not, and whether it is - /// sized or not as a base class. - fn compute_sizedness(&mut self) { - let _t = self.timer("compute_sizedness"); - assert!(self.sizedness.is_none()); - self.sizedness = Some(analyze::(self)); - } - - /// Look up whether the type with the given id is sized or not. - pub fn lookup_sizedness(&self, id: TypeId) -> SizednessResult { - assert!( - self.in_codegen_phase(), - "We only compute sizedness after we've entered codegen" - ); - - self.sizedness - .as_ref() - .unwrap() - .get(&id) - .cloned() - .unwrap_or(SizednessResult::ZeroSized) - } - - /// Compute whether the type has vtable. - fn compute_has_vtable(&mut self) { - let _t = self.timer("compute_has_vtable"); - assert!(self.have_vtable.is_none()); - self.have_vtable = Some(analyze::(self)); - } - - /// Look up whether the item with `id` has vtable or not. - pub fn lookup_has_vtable(&self, id: TypeId) -> HasVtableResult { - assert!( - self.in_codegen_phase(), - "We only compute vtables when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` has a - // vtable or not. - self.have_vtable - .as_ref() - .unwrap() - .get(&id.into()) - .cloned() - .unwrap_or(HasVtableResult::No) - } - - /// Compute whether the type has a destructor. - fn compute_has_destructor(&mut self) { - let _t = self.timer("compute_has_destructor"); - assert!(self.have_destructor.is_none()); - self.have_destructor = Some(analyze::(self)); - } - - /// Look up whether the item with `id` has a destructor. - pub fn lookup_has_destructor(&self, id: TypeId) -> bool { - assert!( - self.in_codegen_phase(), - "We only compute destructors when we enter codegen" - ); - - self.have_destructor.as_ref().unwrap().contains(&id.into()) - } - - fn find_used_template_parameters(&mut self) { - let _t = self.timer("find_used_template_parameters"); - if self.options.allowlist_recursively { - let used_params = analyze::(self); - self.used_template_parameters = Some(used_params); - } else { - // If you aren't recursively allowlisting, then we can't really make - // any sense of template parameter usage, and you're on your own. - let mut used_params = HashMap::default(); - for &id in self.allowlisted_items() { - used_params.entry(id).or_insert_with(|| { - id.self_template_params(self) - .into_iter() - .map(|p| p.into()) - .collect() - }); - } - self.used_template_parameters = Some(used_params); - } - } - - /// Return `true` if `item` uses the given `template_param`, `false` - /// otherwise. - /// - /// This method may only be called during the codegen phase, because the - /// template usage information is only computed as we enter the codegen - /// phase. - /// - /// If the item is blocklisted, then we say that it always uses the template - /// parameter. This is a little subtle. The template parameter usage - /// analysis only considers allowlisted items, and if any blocklisted item - /// shows up in the generated bindings, it is the user's responsibility to - /// manually provide a definition for them. To give them the most - /// flexibility when doing that, we assume that they use every template - /// parameter and always pass template arguments through in instantiations. - pub fn uses_template_parameter( - &self, - item: ItemId, - template_param: TypeId, - ) -> bool { - assert!( - self.in_codegen_phase(), - "We only compute template parameter usage as we enter codegen" - ); - - if self.resolve_item(item).is_blocklisted(self) { - return true; - } - - let template_param = template_param - .into_resolver() - .through_type_refs() - .through_type_aliases() - .resolve(self) - .id(); - - self.used_template_parameters - .as_ref() - .expect("should have found template parameter usage if we're in codegen") - .get(&item) - .map_or(false, |items_used_params| items_used_params.contains(&template_param)) - } - - /// Return `true` if `item` uses any unbound, generic template parameters, - /// `false` otherwise. - /// - /// Has the same restrictions that `uses_template_parameter` has. - pub fn uses_any_template_parameters(&self, item: ItemId) -> bool { - assert!( - self.in_codegen_phase(), - "We only compute template parameter usage as we enter codegen" - ); - - self.used_template_parameters - .as_ref() - .expect( - "should have template parameter usage info in codegen phase", - ) - .get(&item) - .map_or(false, |used| !used.is_empty()) - } - - // This deserves a comment. Builtin types don't get a valid declaration, so - // we can't add it to the cursor->type map. - // - // That being said, they're not generated anyway, and are few, so the - // duplication and special-casing is fine. - // - // If at some point we care about the memory here, probably a map TypeKind - // -> builtin type ItemId would be the best to improve that. - fn add_builtin_item(&mut self, item: Item) { - debug!("add_builtin_item: item = {:?}", item); - debug_assert!(item.kind().is_type()); - self.add_item_to_module(&item); - let id = item.id(); - let old_item = mem::replace(&mut self.items[id.0], Some(item)); - assert!(old_item.is_none(), "Inserted type twice?"); - } - - fn build_root_module(id: ItemId) -> Item { - let module = Module::new(Some("root".into()), ModuleKind::Normal); - Item::new(id, None, None, id, ItemKind::Module(module), None) - } - - /// Get the root module. - pub fn root_module(&self) -> ModuleId { - self.root_module - } - - /// Resolve a type with the given id. - /// - /// Panics if there is no item for the given `TypeId` or if the resolved - /// item is not a `Type`. - pub fn resolve_type(&self, type_id: TypeId) -> &Type { - self.resolve_item(type_id).kind().expect_type() - } - - /// Resolve a function with the given id. - /// - /// Panics if there is no item for the given `FunctionId` or if the resolved - /// item is not a `Function`. - pub fn resolve_func(&self, func_id: FunctionId) -> &Function { - self.resolve_item(func_id).kind().expect_function() - } - - /// Resolve the given `ItemId` as a type, or `None` if there is no item with - /// the given id. - /// - /// Panics if the id resolves to an item that is not a type. - pub fn safe_resolve_type(&self, type_id: TypeId) -> Option<&Type> { - self.resolve_item_fallible(type_id) - .map(|t| t.kind().expect_type()) - } - - /// Resolve the given `ItemId` into an `Item`, or `None` if no such item - /// exists. - pub fn resolve_item_fallible>( - &self, - id: Id, - ) -> Option<&Item> { - self.items.get(id.into().0)?.as_ref() - } - - /// Resolve the given `ItemId` into an `Item`. - /// - /// Panics if the given id does not resolve to any item. - pub fn resolve_item>(&self, item_id: Id) -> &Item { - let item_id = item_id.into(); - match self.resolve_item_fallible(item_id) { - Some(item) => item, - None => panic!("Not an item: {:?}", item_id), - } - } - - /// Get the current module. - pub fn current_module(&self) -> ModuleId { - self.current_module - } - - /// Add a semantic parent for a given type definition. - /// - /// We do this from the type declaration, in order to be able to find the - /// correct type definition afterwards. - /// - /// TODO(emilio): We could consider doing this only when - /// declaration.lexical_parent() != definition.lexical_parent(), but it's - /// not sure it's worth it. - pub fn add_semantic_parent( - &mut self, - definition: clang::Cursor, - parent_id: ItemId, - ) { - self.semantic_parents.insert(definition, parent_id); - } - - /// Returns a known semantic parent for a given definition. - pub fn known_semantic_parent( - &self, - definition: clang::Cursor, - ) -> Option { - self.semantic_parents.get(&definition).cloned() - } - - /// Given a cursor pointing to the location of a template instantiation, - /// return a tuple of the form `(declaration_cursor, declaration_id, - /// num_expected_template_args)`. - /// - /// Note that `declaration_id` is not guaranteed to be in the context's item - /// set! It is possible that it is a partial type that we are still in the - /// middle of parsing. - fn get_declaration_info_for_template_instantiation( - &self, - instantiation: &Cursor, - ) -> Option<(Cursor, ItemId, usize)> { - instantiation - .cur_type() - .canonical_declaration(Some(instantiation)) - .and_then(|canon_decl| { - self.get_resolved_type(&canon_decl).and_then( - |template_decl_id| { - let num_params = - template_decl_id.num_self_template_params(self); - if num_params == 0 { - None - } else { - Some(( - *canon_decl.cursor(), - template_decl_id.into(), - num_params, - )) - } - }, - ) - }) - .or_else(|| { - // If we haven't already parsed the declaration of - // the template being instantiated, then it *must* - // be on the stack of types we are currently - // parsing. If it wasn't then clang would have - // already errored out before we started - // constructing our IR because you can't instantiate - // a template until it is fully defined. - instantiation - .referenced() - .and_then(|referenced| { - self.currently_parsed_types() - .iter() - .find(|partial_ty| *partial_ty.decl() == referenced) - .cloned() - }) - .and_then(|template_decl| { - let num_template_params = - template_decl.num_self_template_params(self); - if num_template_params == 0 { - None - } else { - Some(( - *template_decl.decl(), - template_decl.id(), - num_template_params, - )) - } - }) - }) - } - - /// Parse a template instantiation, eg `Foo`. - /// - /// This is surprisingly difficult to do with libclang, due to the fact that - /// it doesn't provide explicit template argument information, except for - /// function template declarations(!?!??!). - /// - /// The only way to do this is manually inspecting the AST and looking for - /// TypeRefs and TemplateRefs inside. This, unfortunately, doesn't work for - /// more complex cases, see the comment on the assertion below. - /// - /// To add insult to injury, the AST itself has structure that doesn't make - /// sense. Sometimes `Foo>` has an AST with nesting like you might - /// expect: `(Foo (Bar (int)))`. Other times, the AST we get is completely - /// flat: `(Foo Bar int)`. - /// - /// To see an example of what this method handles: - /// - /// ```c++ - /// template - /// class Incomplete { - /// T p; - /// }; - /// - /// template - /// class Foo { - /// Incomplete bar; - /// }; - /// ``` - /// - /// Finally, template instantiations are always children of the current - /// module. They use their template's definition for their name, so the - /// parent is only useful for ensuring that their layout tests get - /// codegen'd. - fn instantiate_template( - &mut self, - with_id: ItemId, - template: TypeId, - ty: &clang::Type, - location: clang::Cursor, - ) -> Option { - let num_expected_args = - self.resolve_type(template).num_self_template_params(self); - if num_expected_args == 0 { - warn!( - "Tried to instantiate a template for which we could not \ - determine any template parameters" - ); - return None; - } - - let mut args = vec![]; - let mut found_const_arg = false; - let mut children = location.collect_children(); - - if children.iter().all(|c| !c.has_children()) { - // This is insanity... If clang isn't giving us a properly nested - // AST for which template arguments belong to which template we are - // instantiating, we'll need to construct it ourselves. However, - // there is an extra `NamespaceRef, NamespaceRef, ..., TemplateRef` - // representing a reference to the outermost template declaration - // that we need to filter out of the children. We need to do this - // filtering because we already know which template declaration is - // being specialized via the `location`'s type, and if we do not - // filter it out, we'll add an extra layer of template instantiation - // on accident. - let idx = children - .iter() - .position(|c| c.kind() == clang_sys::CXCursor_TemplateRef); - if let Some(idx) = idx { - if children - .iter() - .take(idx) - .all(|c| c.kind() == clang_sys::CXCursor_NamespaceRef) - { - children = children.into_iter().skip(idx + 1).collect(); - } - } - } - - for child in children.iter().rev() { - match child.kind() { - clang_sys::CXCursor_TypeRef | - clang_sys::CXCursor_TypedefDecl | - clang_sys::CXCursor_TypeAliasDecl => { - // The `with_id` id will potentially end up unused if we give up - // on this type (for example, because it has const value - // template args), so if we pass `with_id` as the parent, it is - // potentially a dangling reference. Instead, use the canonical - // template declaration as the parent. It is already parsed and - // has a known-resolvable `ItemId`. - let ty = Item::from_ty_or_ref( - child.cur_type(), - *child, - Some(template.into()), - self, - ); - args.push(ty); - } - clang_sys::CXCursor_TemplateRef => { - let ( - template_decl_cursor, - template_decl_id, - num_expected_template_args, - ) = self.get_declaration_info_for_template_instantiation( - child, - )?; - - if num_expected_template_args == 0 || - child.has_at_least_num_children( - num_expected_template_args, - ) - { - // Do a happy little parse. See comment in the TypeRef - // match arm about parent IDs. - let ty = Item::from_ty_or_ref( - child.cur_type(), - *child, - Some(template.into()), - self, - ); - args.push(ty); - } else { - // This is the case mentioned in the doc comment where - // clang gives us a flattened AST and we have to - // reconstruct which template arguments go to which - // instantiation :( - let args_len = args.len(); - if args_len < num_expected_template_args { - warn!( - "Found a template instantiation without \ - enough template arguments" - ); - return None; - } - - let mut sub_args: Vec<_> = args - .drain(args_len - num_expected_template_args..) - .collect(); - sub_args.reverse(); - - let sub_name = Some(template_decl_cursor.spelling()); - let sub_inst = TemplateInstantiation::new( - // This isn't guaranteed to be a type that we've - // already finished parsing yet. - template_decl_id.as_type_id_unchecked(), - sub_args, - ); - let sub_kind = - TypeKind::TemplateInstantiation(sub_inst); - let sub_ty = Type::new( - sub_name, - template_decl_cursor - .cur_type() - .fallible_layout(self) - .ok(), - sub_kind, - false, - ); - let sub_id = self.next_item_id(); - let sub_item = Item::new( - sub_id, - None, - None, - self.current_module.into(), - ItemKind::Type(sub_ty), - Some(child.location()), - ); - - // Bypass all the validations in add_item explicitly. - debug!( - "instantiate_template: inserting nested \ - instantiation item: {:?}", - sub_item - ); - self.add_item_to_module(&sub_item); - debug_assert_eq!(sub_id, sub_item.id()); - self.items[sub_id.0] = Some(sub_item); - args.push(sub_id.as_type_id_unchecked()); - } - } - _ => { - warn!( - "Found template arg cursor we can't handle: {:?}", - child - ); - found_const_arg = true; - } - } - } - - if found_const_arg { - // This is a dependently typed template instantiation. That is, an - // instantiation of a template with one or more const values as - // template arguments, rather than only types as template - // arguments. For example, `Foo` versus `Bar`. - // We can't handle these instantiations, so just punt in this - // situation... - warn!( - "Found template instantiated with a const value; \ - bindgen can't handle this kind of template instantiation!" - ); - return None; - } - - if args.len() != num_expected_args { - warn!( - "Found a template with an unexpected number of template \ - arguments" - ); - return None; - } - - args.reverse(); - let type_kind = TypeKind::TemplateInstantiation( - TemplateInstantiation::new(template, args), - ); - let name = ty.spelling(); - let name = if name.is_empty() { None } else { Some(name) }; - let ty = Type::new( - name, - ty.fallible_layout(self).ok(), - type_kind, - ty.is_const(), - ); - let item = Item::new( - with_id, - None, - None, - self.current_module.into(), - ItemKind::Type(ty), - Some(location.location()), - ); - - // Bypass all the validations in add_item explicitly. - debug!("instantiate_template: inserting item: {:?}", item); - self.add_item_to_module(&item); - debug_assert_eq!(with_id, item.id()); - self.items[with_id.0] = Some(item); - Some(with_id.as_type_id_unchecked()) - } - - /// If we have already resolved the type for the given type declaration, - /// return its `ItemId`. Otherwise, return `None`. - pub fn get_resolved_type( - &self, - decl: &clang::CanonicalTypeDeclaration, - ) -> Option { - self.types - .get(&TypeKey::Declaration(*decl.cursor())) - .or_else(|| { - decl.cursor() - .usr() - .and_then(|usr| self.types.get(&TypeKey::Usr(usr))) - }) - .cloned() - } - - /// Looks up for an already resolved type, either because it's builtin, or - /// because we already have it in the map. - pub fn builtin_or_resolved_ty( - &mut self, - with_id: ItemId, - parent_id: Option, - ty: &clang::Type, - location: Option, - ) -> Option { - use clang_sys::{CXCursor_TypeAliasTemplateDecl, CXCursor_TypeRef}; - debug!( - "builtin_or_resolved_ty: {:?}, {:?}, {:?}, {:?}", - ty, location, with_id, parent_id - ); - - if let Some(decl) = ty.canonical_declaration(location.as_ref()) { - if let Some(id) = self.get_resolved_type(&decl) { - debug!( - "Already resolved ty {:?}, {:?}, {:?} {:?}", - id, decl, ty, location - ); - // If the declaration already exists, then either: - // - // * the declaration is a template declaration of some sort, - // and we are looking at an instantiation or specialization - // of it, or - // * we have already parsed and resolved this type, and - // there's nothing left to do. - if let Some(location) = location { - if decl.cursor().is_template_like() && - *ty != decl.cursor().cur_type() - { - // For specialized type aliases, there's no way to get the - // template parameters as of this writing (for a struct - // specialization we wouldn't be in this branch anyway). - // - // Explicitly return `None` if there aren't any - // unspecialized parameters (contains any `TypeRef`) so we - // resolve the canonical type if there is one and it's - // exposed. - // - // This is _tricky_, I know :( - if decl.cursor().kind() == - CXCursor_TypeAliasTemplateDecl && - !location.contains_cursor(CXCursor_TypeRef) && - ty.canonical_type().is_valid_and_exposed() - { - return None; - } - - return self - .instantiate_template(with_id, id, ty, location) - .or(Some(id)); - } - } - - return Some(self.build_ty_wrapper(with_id, id, parent_id, ty)); - } - } - - debug!("Not resolved, maybe builtin?"); - self.build_builtin_ty(ty) - } - - /// Make a new item that is a resolved type reference to the `wrapped_id`. - /// - /// This is unfortunately a lot of bloat, but is needed to properly track - /// constness et al. - /// - /// We should probably make the constness tracking separate, so it doesn't - /// bloat that much, but hey, we already bloat the heck out of builtin - /// types. - pub fn build_ty_wrapper( - &mut self, - with_id: ItemId, - wrapped_id: TypeId, - parent_id: Option, - ty: &clang::Type, - ) -> TypeId { - self.build_wrapper(with_id, wrapped_id, parent_id, ty, ty.is_const()) - } - - /// A wrapper over a type that adds a const qualifier explicitly. - /// - /// Needed to handle const methods in C++, wrapping the type . - pub fn build_const_wrapper( - &mut self, - with_id: ItemId, - wrapped_id: TypeId, - parent_id: Option, - ty: &clang::Type, - ) -> TypeId { - self.build_wrapper( - with_id, wrapped_id, parent_id, ty, /* is_const = */ true, - ) - } - - fn build_wrapper( - &mut self, - with_id: ItemId, - wrapped_id: TypeId, - parent_id: Option, - ty: &clang::Type, - is_const: bool, - ) -> TypeId { - let spelling = ty.spelling(); - let layout = ty.fallible_layout(self).ok(); - let location = ty.declaration().location(); - let type_kind = TypeKind::ResolvedTypeRef(wrapped_id); - let ty = Type::new(Some(spelling), layout, type_kind, is_const); - let item = Item::new( - with_id, - None, - None, - parent_id.unwrap_or_else(|| self.current_module.into()), - ItemKind::Type(ty), - Some(location), - ); - self.add_builtin_item(item); - with_id.as_type_id_unchecked() - } - - /// Returns the next item id to be used for an item. - pub fn next_item_id(&mut self) -> ItemId { - let ret = ItemId(self.items.len()); - self.items.push(None); - ret - } - - fn build_builtin_ty(&mut self, ty: &clang::Type) -> Option { - use clang_sys::*; - let type_kind = match ty.kind() { - CXType_NullPtr => TypeKind::NullPtr, - CXType_Void => TypeKind::Void, - CXType_Bool => TypeKind::Int(IntKind::Bool), - CXType_Int => TypeKind::Int(IntKind::Int), - CXType_UInt => TypeKind::Int(IntKind::UInt), - CXType_Char_S => TypeKind::Int(IntKind::Char { is_signed: true }), - CXType_Char_U => TypeKind::Int(IntKind::Char { is_signed: false }), - CXType_SChar => TypeKind::Int(IntKind::SChar), - CXType_UChar => TypeKind::Int(IntKind::UChar), - CXType_Short => TypeKind::Int(IntKind::Short), - CXType_UShort => TypeKind::Int(IntKind::UShort), - CXType_WChar => TypeKind::Int(IntKind::WChar), - CXType_Char16 => TypeKind::Int(IntKind::U16), - CXType_Char32 => TypeKind::Int(IntKind::U32), - CXType_Long => TypeKind::Int(IntKind::Long), - CXType_ULong => TypeKind::Int(IntKind::ULong), - CXType_LongLong => TypeKind::Int(IntKind::LongLong), - CXType_ULongLong => TypeKind::Int(IntKind::ULongLong), - CXType_Int128 => TypeKind::Int(IntKind::I128), - CXType_UInt128 => TypeKind::Int(IntKind::U128), - CXType_Float => TypeKind::Float(FloatKind::Float), - CXType_Double => TypeKind::Float(FloatKind::Double), - CXType_LongDouble => TypeKind::Float(FloatKind::LongDouble), - CXType_Float128 => TypeKind::Float(FloatKind::Float128), - CXType_Complex => { - let float_type = - ty.elem_type().expect("Not able to resolve complex type?"); - let float_kind = match float_type.kind() { - CXType_Float => FloatKind::Float, - CXType_Double => FloatKind::Double, - CXType_LongDouble => FloatKind::LongDouble, - CXType_Float128 => FloatKind::Float128, - _ => panic!( - "Non floating-type complex? {:?}, {:?}", - ty, float_type, - ), - }; - TypeKind::Complex(float_kind) - } - _ => return None, - }; - - let spelling = ty.spelling(); - let is_const = ty.is_const(); - let layout = ty.fallible_layout(self).ok(); - let location = ty.declaration().location(); - let ty = Type::new(Some(spelling), layout, type_kind, is_const); - let id = self.next_item_id(); - let item = Item::new( - id, - None, - None, - self.root_module.into(), - ItemKind::Type(ty), - Some(location), - ); - self.add_builtin_item(item); - Some(id.as_type_id_unchecked()) - } - - /// Get the current Clang translation unit that is being processed. - pub fn translation_unit(&self) -> &clang::TranslationUnit { - &self.translation_unit - } - - /// Have we parsed the macro named `macro_name` already? - pub fn parsed_macro(&self, macro_name: &[u8]) -> bool { - self.parsed_macros.contains_key(macro_name) - } - - /// Get the currently parsed macros. - pub fn parsed_macros( - &self, - ) -> &StdHashMap, cexpr::expr::EvalResult> { - debug_assert!(!self.in_codegen_phase()); - &self.parsed_macros - } - - /// Mark the macro named `macro_name` as parsed. - pub fn note_parsed_macro( - &mut self, - id: Vec, - value: cexpr::expr::EvalResult, - ) { - self.parsed_macros.insert(id, value); - } - - /// Are we in the codegen phase? - pub fn in_codegen_phase(&self) -> bool { - self.in_codegen - } - - /// Mark the type with the given `name` as replaced by the type with id - /// `potential_ty`. - /// - /// Replacement types are declared using the `replaces="xxx"` annotation, - /// and implies that the original type is hidden. - pub fn replace(&mut self, name: &[String], potential_ty: ItemId) { - match self.replacements.entry(name.into()) { - Entry::Vacant(entry) => { - debug!( - "Defining replacement for {:?} as {:?}", - name, potential_ty - ); - entry.insert(potential_ty); - } - Entry::Occupied(occupied) => { - warn!( - "Replacement for {:?} already defined as {:?}; \ - ignoring duplicate replacement definition as {:?}", - name, - occupied.get(), - potential_ty - ); - } - } - } - - /// Has the item with the given `name` and `id` been replaced by another - /// type? - pub fn is_replaced_type>( - &self, - path: &[String], - id: Id, - ) -> bool { - let id = id.into(); - matches!(self.replacements.get(path), Some(replaced_by) if *replaced_by != id) - } - - /// Is the type with the given `name` marked as opaque? - pub fn opaque_by_name(&self, path: &[String]) -> bool { - debug_assert!( - self.in_codegen_phase(), - "You're not supposed to call this yet" - ); - self.options.opaque_types.matches(&path[1..].join("::")) - } - - /// Get the options used to configure this bindgen context. - pub(crate) fn options(&self) -> &BindgenOptions { - &self.options - } - - /// Tokenizes a namespace cursor in order to get the name and kind of the - /// namespace. - fn tokenize_namespace( - &self, - cursor: &clang::Cursor, - ) -> (Option, ModuleKind) { - assert_eq!( - cursor.kind(), - ::clang_sys::CXCursor_Namespace, - "Be a nice person" - ); - - let mut module_name = None; - let spelling = cursor.spelling(); - if !spelling.is_empty() { - module_name = Some(spelling) - } - - let mut kind = ModuleKind::Normal; - let mut found_namespace_keyword = false; - for token in cursor.tokens().iter() { - match token.spelling() { - b"inline" => { - assert!(!found_namespace_keyword); - assert!(kind != ModuleKind::Inline); - kind = ModuleKind::Inline; - } - // The double colon allows us to handle nested namespaces like - // namespace foo::bar { } - // - // libclang still gives us two namespace cursors, which is cool, - // but the tokenization of the second begins with the double - // colon. That's ok, so we only need to handle the weird - // tokenization here. - // - // Fortunately enough, inline nested namespace specifiers aren't - // a thing, and are invalid C++ :) - b"namespace" | b"::" => { - found_namespace_keyword = true; - } - b"{" => { - assert!(found_namespace_keyword); - break; - } - name if found_namespace_keyword => { - if module_name.is_none() { - module_name = - Some(String::from_utf8_lossy(name).into_owned()); - } - break; - } - spelling if !found_namespace_keyword => { - // This is _likely_, but not certainly, a macro that's been placed just before - // the namespace keyword. Unfortunately, clang tokens don't let us easily see - // through the ifdef tokens, so we don't know what this token should really be. - // Instead of panicking though, we warn the user that we assumed the token was - // blank, and then move on. - // - // See also https://github.com/rust-lang/rust-bindgen/issues/1676. - warn!( - "Ignored unknown namespace prefix '{}' at {:?} in {:?}", - String::from_utf8_lossy(spelling), - token, - cursor - ); - } - spelling => { - panic!( - "Unknown token '{}' while processing namespace at {:?} in {:?}", - String::from_utf8_lossy(spelling), - token, - cursor - ); - } - } - } - - (module_name, kind) - } - - /// Given a CXCursor_Namespace cursor, return the item id of the - /// corresponding module, or create one on the fly. - pub fn module(&mut self, cursor: clang::Cursor) -> ModuleId { - use clang_sys::*; - assert_eq!(cursor.kind(), CXCursor_Namespace, "Be a nice person"); - let cursor = cursor.canonical(); - if let Some(id) = self.modules.get(&cursor) { - return *id; - } - - let (module_name, kind) = self.tokenize_namespace(&cursor); - - let module_id = self.next_item_id(); - let module = Module::new(module_name, kind); - let module = Item::new( - module_id, - None, - None, - self.current_module.into(), - ItemKind::Module(module), - Some(cursor.location()), - ); - - let module_id = module.id().as_module_id_unchecked(); - self.modules.insert(cursor, module_id); - - self.add_item(module, None, None); - - module_id - } - - /// Start traversing the module with the given `module_id`, invoke the - /// callback `cb`, and then return to traversing the original module. - pub fn with_module(&mut self, module_id: ModuleId, cb: F) - where - F: FnOnce(&mut Self), - { - debug_assert!(self.resolve_item(module_id).kind().is_module(), "Wat"); - - let previous_id = self.current_module; - self.current_module = module_id; - - cb(self); - - self.current_module = previous_id; - } - - /// Iterate over all (explicitly or transitively) allowlisted items. - /// - /// If no items are explicitly allowlisted, then all items are considered - /// allowlisted. - pub fn allowlisted_items(&self) -> &ItemSet { - assert!(self.in_codegen_phase()); - assert!(self.current_module == self.root_module); - - self.allowlisted.as_ref().unwrap() - } - - /// Check whether a particular blocklisted type implements a trait or not. - /// Results may be cached. - pub fn blocklisted_type_implements_trait( - &self, - item: &Item, - derive_trait: DeriveTrait, - ) -> CanDerive { - assert!(self.in_codegen_phase()); - assert!(self.current_module == self.root_module); - - *self - .blocklisted_types_implement_traits - .borrow_mut() - .entry(derive_trait) - .or_default() - .entry(item.id()) - .or_insert_with(|| { - item.expect_type() - .name() - .and_then(|name| match self.options.parse_callbacks { - Some(ref cb) => cb.blocklisted_type_implements_trait( - name, - derive_trait, - ), - // Sized integer types from get mapped to Rust primitive - // types regardless of whether they are blocklisted, so ensure that - // standard traits are considered derivable for them too. - None => match name { - "int8_t" | "uint8_t" | "int16_t" | "uint16_t" | - "int32_t" | "uint32_t" | "int64_t" | - "uint64_t" | "uintptr_t" | "intptr_t" | - "ptrdiff_t" => Some(CanDerive::Yes), - "size_t" if self.options.size_t_is_usize => { - Some(CanDerive::Yes) - } - "ssize_t" if self.options.size_t_is_usize => { - Some(CanDerive::Yes) - } - _ => Some(CanDerive::No), - }, - }) - .unwrap_or(CanDerive::No) - }) - } - - /// Get a reference to the set of items we should generate. - pub fn codegen_items(&self) -> &ItemSet { - assert!(self.in_codegen_phase()); - assert!(self.current_module == self.root_module); - self.codegen_items.as_ref().unwrap() - } - - /// Compute the allowlisted items set and populate `self.allowlisted`. - fn compute_allowlisted_and_codegen_items(&mut self) { - assert!(self.in_codegen_phase()); - assert!(self.current_module == self.root_module); - assert!(self.allowlisted.is_none()); - let _t = self.timer("compute_allowlisted_and_codegen_items"); - - let roots = { - let mut roots = self - .items() - // Only consider roots that are enabled for codegen. - .filter(|&(_, item)| item.is_enabled_for_codegen(self)) - .filter(|&(_, item)| { - // If nothing is explicitly allowlisted, then everything is fair - // game. - if self.options().allowlisted_types.is_empty() && - self.options().allowlisted_functions.is_empty() && - self.options().allowlisted_vars.is_empty() - { - return true; - } - - // If this is a type that explicitly replaces another, we assume - // you know what you're doing. - if item.annotations().use_instead_of().is_some() { - return true; - } - - let name = item.path_for_allowlisting(self)[1..].join("::"); - debug!("allowlisted_items: testing {:?}", name); - match *item.kind() { - ItemKind::Module(..) => true, - ItemKind::Function(_) => { - self.options().allowlisted_functions.matches(&name) - } - ItemKind::Var(_) => { - self.options().allowlisted_vars.matches(&name) - } - ItemKind::Type(ref ty) => { - if self.options().allowlisted_types.matches(&name) { - return true; - } - - // Auto-allowlist types that don't need code - // generation if not allowlisting recursively, to - // make the #[derive] analysis not be lame. - if !self.options().allowlist_recursively { - match *ty.kind() { - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Complex(..) | - TypeKind::Array(..) | - TypeKind::Vector(..) | - TypeKind::Pointer(..) | - TypeKind::Reference(..) | - TypeKind::Function(..) | - TypeKind::ResolvedTypeRef(..) | - TypeKind::Opaque | - TypeKind::TypeParam => return true, - _ => {} - }; - } - - // Unnamed top-level enums are special and we - // allowlist them via the `allowlisted_vars` filter, - // since they're effectively top-level constants, - // and there's no way for them to be referenced - // consistently. - let parent = self.resolve_item(item.parent_id()); - if !parent.is_module() { - return false; - } - - let enum_ = match *ty.kind() { - TypeKind::Enum(ref e) => e, - _ => return false, - }; - - if ty.name().is_some() { - return false; - } - - let mut prefix_path = - parent.path_for_allowlisting(self).clone(); - enum_.variants().iter().any(|variant| { - prefix_path.push( - variant.name_for_allowlisting().into(), - ); - let name = prefix_path[1..].join("::"); - prefix_path.pop().unwrap(); - self.options().allowlisted_vars.matches(&name) - }) - } - } - }) - .map(|(id, _)| id) - .collect::>(); - - // The reversal preserves the expected ordering of traversal, - // resulting in more stable-ish bindgen-generated names for - // anonymous types (like unions). - roots.reverse(); - roots - }; - - let allowlisted_items_predicate = - if self.options().allowlist_recursively { - traversal::all_edges - } else { - // Only follow InnerType edges from the allowlisted roots. - // Such inner types (e.g. anonymous structs/unions) are - // always emitted by codegen, and they need to be allowlisted - // to make sure they are processed by e.g. the derive analysis. - traversal::only_inner_type_edges - }; - - let allowlisted = AllowlistedItemsTraversal::new( - self, - roots.clone(), - allowlisted_items_predicate, - ) - .collect::(); - - let codegen_items = if self.options().allowlist_recursively { - AllowlistedItemsTraversal::new( - self, - roots, - traversal::codegen_edges, - ) - .collect::() - } else { - allowlisted.clone() - }; - - self.allowlisted = Some(allowlisted); - self.codegen_items = Some(codegen_items); - - for item in self.options().allowlisted_functions.unmatched_items() { - warn!("unused option: --allowlist-function {}", item); - } - - for item in self.options().allowlisted_vars.unmatched_items() { - warn!("unused option: --allowlist-var {}", item); - } - - for item in self.options().allowlisted_types.unmatched_items() { - warn!("unused option: --allowlist-type {}", item); - } - } - - /// Convenient method for getting the prefix to use for most traits in - /// codegen depending on the `use_core` option. - pub fn trait_prefix(&self) -> Ident { - if self.options().use_core { - self.rust_ident_raw("core") - } else { - self.rust_ident_raw("std") - } - } - - /// Call if a bindgen complex is generated - pub fn generated_bindgen_complex(&self) { - self.generated_bindgen_complex.set(true) - } - - /// Whether we need to generate the bindgen complex type - pub fn need_bindgen_complex_type(&self) -> bool { - self.generated_bindgen_complex.get() - } - - /// Compute whether we can derive debug. - fn compute_cannot_derive_debug(&mut self) { - let _t = self.timer("compute_cannot_derive_debug"); - assert!(self.cannot_derive_debug.is_none()); - if self.options.derive_debug { - self.cannot_derive_debug = - Some(as_cannot_derive_set(analyze::(( - self, - DeriveTrait::Debug, - )))); - } - } - - /// Look up whether the item with `id` can - /// derive debug or not. - pub fn lookup_can_derive_debug>(&self, id: Id) -> bool { - let id = id.into(); - assert!( - self.in_codegen_phase(), - "We only compute can_derive_debug when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` can - // derive debug or not. - !self.cannot_derive_debug.as_ref().unwrap().contains(&id) - } - - /// Compute whether we can derive default. - fn compute_cannot_derive_default(&mut self) { - let _t = self.timer("compute_cannot_derive_default"); - assert!(self.cannot_derive_default.is_none()); - if self.options.derive_default { - self.cannot_derive_default = - Some(as_cannot_derive_set(analyze::(( - self, - DeriveTrait::Default, - )))); - } - } - - /// Look up whether the item with `id` can - /// derive default or not. - pub fn lookup_can_derive_default>(&self, id: Id) -> bool { - let id = id.into(); - assert!( - self.in_codegen_phase(), - "We only compute can_derive_default when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` can - // derive default or not. - !self.cannot_derive_default.as_ref().unwrap().contains(&id) - } - - /// Compute whether we can derive copy. - fn compute_cannot_derive_copy(&mut self) { - let _t = self.timer("compute_cannot_derive_copy"); - assert!(self.cannot_derive_copy.is_none()); - self.cannot_derive_copy = - Some(as_cannot_derive_set(analyze::(( - self, - DeriveTrait::Copy, - )))); - } - - /// Compute whether we can derive hash. - fn compute_cannot_derive_hash(&mut self) { - let _t = self.timer("compute_cannot_derive_hash"); - assert!(self.cannot_derive_hash.is_none()); - if self.options.derive_hash { - self.cannot_derive_hash = - Some(as_cannot_derive_set(analyze::(( - self, - DeriveTrait::Hash, - )))); - } - } - - /// Look up whether the item with `id` can - /// derive hash or not. - pub fn lookup_can_derive_hash>(&self, id: Id) -> bool { - let id = id.into(); - assert!( - self.in_codegen_phase(), - "We only compute can_derive_debug when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` can - // derive hash or not. - !self.cannot_derive_hash.as_ref().unwrap().contains(&id) - } - - /// Compute whether we can derive PartialOrd, PartialEq or Eq. - fn compute_cannot_derive_partialord_partialeq_or_eq(&mut self) { - let _t = self.timer("compute_cannot_derive_partialord_partialeq_or_eq"); - assert!(self.cannot_derive_partialeq_or_partialord.is_none()); - if self.options.derive_partialord || - self.options.derive_partialeq || - self.options.derive_eq - { - self.cannot_derive_partialeq_or_partialord = - Some(analyze::(( - self, - DeriveTrait::PartialEqOrPartialOrd, - ))); - } - } - - /// Look up whether the item with `id` can derive `Partial{Eq,Ord}`. - pub fn lookup_can_derive_partialeq_or_partialord>( - &self, - id: Id, - ) -> CanDerive { - let id = id.into(); - assert!( - self.in_codegen_phase(), - "We only compute can_derive_partialeq_or_partialord when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` can - // derive partialeq or not. - self.cannot_derive_partialeq_or_partialord - .as_ref() - .unwrap() - .get(&id) - .cloned() - .unwrap_or(CanDerive::Yes) - } - - /// Look up whether the item with `id` can derive `Copy` or not. - pub fn lookup_can_derive_copy>(&self, id: Id) -> bool { - assert!( - self.in_codegen_phase(), - "We only compute can_derive_debug when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` can - // derive `Copy` or not. - let id = id.into(); - - !self.lookup_has_type_param_in_array(id) && - !self.cannot_derive_copy.as_ref().unwrap().contains(&id) - } - - /// Compute whether the type has type parameter in array. - fn compute_has_type_param_in_array(&mut self) { - let _t = self.timer("compute_has_type_param_in_array"); - assert!(self.has_type_param_in_array.is_none()); - self.has_type_param_in_array = - Some(analyze::(self)); - } - - /// Look up whether the item with `id` has type parameter in array or not. - pub fn lookup_has_type_param_in_array>( - &self, - id: Id, - ) -> bool { - assert!( - self.in_codegen_phase(), - "We only compute has array when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` has - // type parameter in array or not. - self.has_type_param_in_array - .as_ref() - .unwrap() - .contains(&id.into()) - } - - /// Compute whether the type has float. - fn compute_has_float(&mut self) { - let _t = self.timer("compute_has_float"); - assert!(self.has_float.is_none()); - if self.options.derive_eq || self.options.derive_ord { - self.has_float = Some(analyze::(self)); - } - } - - /// Look up whether the item with `id` has array or not. - pub fn lookup_has_float>(&self, id: Id) -> bool { - assert!( - self.in_codegen_phase(), - "We only compute has float when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` has - // float or not. - self.has_float.as_ref().unwrap().contains(&id.into()) - } - - /// Check if `--no-partialeq` flag is enabled for this item. - pub fn no_partialeq_by_name(&self, item: &Item) -> bool { - let name = item.path_for_allowlisting(self)[1..].join("::"); - self.options().no_partialeq_types.matches(&name) - } - - /// Check if `--no-copy` flag is enabled for this item. - pub fn no_copy_by_name(&self, item: &Item) -> bool { - let name = item.path_for_allowlisting(self)[1..].join("::"); - self.options().no_copy_types.matches(&name) - } - - /// Check if `--no-debug` flag is enabled for this item. - pub fn no_debug_by_name(&self, item: &Item) -> bool { - let name = item.path_for_allowlisting(self)[1..].join("::"); - self.options().no_debug_types.matches(&name) - } - - /// Check if `--no-default` flag is enabled for this item. - pub fn no_default_by_name(&self, item: &Item) -> bool { - let name = item.path_for_allowlisting(self)[1..].join("::"); - self.options().no_default_types.matches(&name) - } - - /// Check if `--no-hash` flag is enabled for this item. - pub fn no_hash_by_name(&self, item: &Item) -> bool { - let name = item.path_for_allowlisting(self)[1..].join("::"); - self.options().no_hash_types.matches(&name) - } - - /// Check if `--must-use-type` flag is enabled for this item. - pub fn must_use_type_by_name(&self, item: &Item) -> bool { - let name = item.path_for_allowlisting(self)[1..].join("::"); - self.options().must_use_types.matches(&name) - } -} - -/// A builder struct for configuring item resolution options. -#[derive(Debug, Copy, Clone)] -pub struct ItemResolver { - id: ItemId, - through_type_refs: bool, - through_type_aliases: bool, -} - -impl ItemId { - /// Create an `ItemResolver` from this item id. - pub fn into_resolver(self) -> ItemResolver { - self.into() - } -} - -impl From for ItemResolver -where - T: Into, -{ - fn from(id: T) -> ItemResolver { - ItemResolver::new(id) - } -} - -impl ItemResolver { - /// Construct a new `ItemResolver` from the given id. - pub fn new>(id: Id) -> ItemResolver { - let id = id.into(); - ItemResolver { - id, - through_type_refs: false, - through_type_aliases: false, - } - } - - /// Keep resolving through `Type::TypeRef` items. - pub fn through_type_refs(mut self) -> ItemResolver { - self.through_type_refs = true; - self - } - - /// Keep resolving through `Type::Alias` items. - pub fn through_type_aliases(mut self) -> ItemResolver { - self.through_type_aliases = true; - self - } - - /// Finish configuring and perform the actual item resolution. - pub fn resolve(self, ctx: &BindgenContext) -> &Item { - assert!(ctx.collected_typerefs()); - - let mut id = self.id; - let mut seen_ids = HashSet::default(); - loop { - let item = ctx.resolve_item(id); - - // Detect cycles and bail out. These can happen in certain cases - // involving incomplete qualified dependent types (#2085). - if !seen_ids.insert(id) { - return item; - } - - let ty_kind = item.as_type().map(|t| t.kind()); - match ty_kind { - Some(&TypeKind::ResolvedTypeRef(next_id)) - if self.through_type_refs => - { - id = next_id.into(); - } - // We intentionally ignore template aliases here, as they are - // more complicated, and don't represent a simple renaming of - // some type. - Some(&TypeKind::Alias(next_id)) - if self.through_type_aliases => - { - id = next_id.into(); - } - _ => return item, - } - } - } -} - -/// A type that we are in the middle of parsing. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct PartialType { - decl: Cursor, - // Just an ItemId, and not a TypeId, because we haven't finished this type - // yet, so there's still time for things to go wrong. - id: ItemId, -} - -impl PartialType { - /// Construct a new `PartialType`. - pub fn new(decl: Cursor, id: ItemId) -> PartialType { - // assert!(decl == decl.canonical()); - PartialType { decl, id } - } - - /// The cursor pointing to this partial type's declaration location. - pub fn decl(&self) -> &Cursor { - &self.decl - } - - /// The item ID allocated for this type. This is *NOT* a key for an entry in - /// the context's item set yet! - pub fn id(&self) -> ItemId { - self.id - } -} - -impl TemplateParameters for PartialType { - fn self_template_params(&self, _ctx: &BindgenContext) -> Vec { - // Maybe at some point we will eagerly parse named types, but for now we - // don't and this information is unavailable. - vec![] - } - - fn num_self_template_params(&self, _ctx: &BindgenContext) -> usize { - // Wouldn't it be nice if libclang would reliably give us this - // information‽ - match self.decl().kind() { - clang_sys::CXCursor_ClassTemplate | - clang_sys::CXCursor_FunctionTemplate | - clang_sys::CXCursor_TypeAliasTemplateDecl => { - let mut num_params = 0; - self.decl().visit(|c| { - match c.kind() { - clang_sys::CXCursor_TemplateTypeParameter | - clang_sys::CXCursor_TemplateTemplateParameter | - clang_sys::CXCursor_NonTypeTemplateParameter => { - num_params += 1; - } - _ => {} - }; - clang_sys::CXChildVisit_Continue - }); - num_params - } - _ => 0, - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/derive.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/derive.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/derive.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/derive.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,135 +0,0 @@ -//! Traits for determining whether we can derive traits for a thing or not. -//! -//! These traits tend to come in pairs: -//! -//! 1. A "trivial" version, whose implementations aren't allowed to recursively -//! look at other types or the results of fix point analyses. -//! -//! 2. A "normal" version, whose implementations simply query the results of a -//! fix point analysis. -//! -//! The former is used by the analyses when creating the results queried by the -//! second. - -use super::context::BindgenContext; - -use std::cmp; -use std::ops; - -/// A trait that encapsulates the logic for whether or not we can derive `Debug` -/// for a given thing. -pub trait CanDeriveDebug { - /// Return `true` if `Debug` can be derived for this thing, `false` - /// otherwise. - fn can_derive_debug(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive `Copy` -/// for a given thing. -pub trait CanDeriveCopy { - /// Return `true` if `Copy` can be derived for this thing, `false` - /// otherwise. - fn can_derive_copy(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive -/// `Default` for a given thing. -pub trait CanDeriveDefault { - /// Return `true` if `Default` can be derived for this thing, `false` - /// otherwise. - fn can_derive_default(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive `Hash` -/// for a given thing. -pub trait CanDeriveHash { - /// Return `true` if `Hash` can be derived for this thing, `false` - /// otherwise. - fn can_derive_hash(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive -/// `PartialEq` for a given thing. -pub trait CanDerivePartialEq { - /// Return `true` if `PartialEq` can be derived for this thing, `false` - /// otherwise. - fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive -/// `PartialOrd` for a given thing. -pub trait CanDerivePartialOrd { - /// Return `true` if `PartialOrd` can be derived for this thing, `false` - /// otherwise. - fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive `Eq` -/// for a given thing. -pub trait CanDeriveEq { - /// Return `true` if `Eq` can be derived for this thing, `false` otherwise. - fn can_derive_eq(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive `Ord` -/// for a given thing. -pub trait CanDeriveOrd { - /// Return `true` if `Ord` can be derived for this thing, `false` otherwise. - fn can_derive_ord(&self, ctx: &BindgenContext) -> bool; -} - -/// Whether it is possible or not to automatically derive trait for an item. -/// -/// ```ignore -/// No -/// ^ -/// | -/// Manually -/// ^ -/// | -/// Yes -/// ``` -/// -/// Initially we assume that we can derive trait for all types and then -/// update our understanding as we learn more about each type. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub enum CanDerive { - /// Yes, we can derive automatically. - Yes, - - /// The only thing that stops us from automatically deriving is that - /// array with more than maximum number of elements is used. - /// - /// This means we probably can "manually" implement such trait. - Manually, - - /// No, we cannot. - No, -} - -impl Default for CanDerive { - fn default() -> CanDerive { - CanDerive::Yes - } -} - -impl CanDerive { - /// Take the least upper bound of `self` and `rhs`. - pub fn join(self, rhs: Self) -> Self { - cmp::max(self, rhs) - } -} - -impl ops::BitOr for CanDerive { - type Output = Self; - - fn bitor(self, rhs: Self) -> Self::Output { - self.join(rhs) - } -} - -impl ops::BitOrAssign for CanDerive { - fn bitor_assign(&mut self, rhs: Self) { - *self = self.join(rhs) - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/dot.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/dot.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/dot.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/dot.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,86 +0,0 @@ -//! Generating Graphviz `dot` files from our IR. - -use super::context::{BindgenContext, ItemId}; -use super::traversal::Trace; -use std::fs::File; -use std::io::{self, Write}; -use std::path::Path; - -/// A trait for anything that can write attributes as `` rows to a dot -/// file. -pub trait DotAttributes { - /// Write this thing's attributes to the given output. Each attribute must - /// be its own `...`. - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write; -} - -/// Write a graphviz dot file containing our IR. -pub fn write_dot_file

(ctx: &BindgenContext, path: P) -> io::Result<()> -where - P: AsRef, -{ - let file = File::create(path)?; - let mut dot_file = io::BufWriter::new(file); - writeln!(&mut dot_file, "digraph {{")?; - - let mut err: Option> = None; - - for (id, item) in ctx.items() { - let is_allowlisted = ctx.allowlisted_items().contains(&id); - - writeln!( - &mut dot_file, - r#"{} [fontname="courier", color={}, label=<

"#, - id.as_usize(), - if is_allowlisted { "black" } else { "gray" } - )?; - item.dot_attributes(ctx, &mut dot_file)?; - writeln!(&mut dot_file, r#"
>];"#)?; - - item.trace( - ctx, - &mut |sub_id: ItemId, edge_kind| { - if err.is_some() { - return; - } - - match writeln!( - &mut dot_file, - "{} -> {} [label={:?}, color={}];", - id.as_usize(), - sub_id.as_usize(), - edge_kind, - if is_allowlisted { "black" } else { "gray" } - ) { - Ok(_) => {} - Err(e) => err = Some(Err(e)), - } - }, - &(), - ); - - if let Some(err) = err { - return err; - } - - if let Some(module) = item.as_module() { - for child in module.children() { - writeln!( - &mut dot_file, - "{} -> {} [style=dotted, color=gray]", - item.id().as_usize(), - child.as_usize() - )?; - } - } - } - - writeln!(&mut dot_file, "}}")?; - Ok(()) -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/enum_ty.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/enum_ty.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/enum_ty.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/enum_ty.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,305 +0,0 @@ -//! Intermediate representation for C/C++ enumerations. - -use super::super::codegen::EnumVariation; -use super::context::{BindgenContext, TypeId}; -use super::item::Item; -use super::ty::{Type, TypeKind}; -use crate::clang; -use crate::ir::annotations::Annotations; -use crate::parse::{ClangItemParser, ParseError}; -use crate::regex_set::RegexSet; - -/// An enum representing custom handling that can be given to a variant. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum EnumVariantCustomBehavior { - /// This variant will be a module containing constants. - ModuleConstify, - /// This variant will be constified, that is, forced to generate a constant. - Constify, - /// This variant will be hidden entirely from the resulting enum. - Hide, -} - -/// A C/C++ enumeration. -#[derive(Debug)] -pub struct Enum { - /// The representation used for this enum; it should be an `IntKind` type or - /// an alias to one. - /// - /// It's `None` if the enum is a forward declaration and isn't defined - /// anywhere else, see `tests/headers/func_ptr_in_struct.h`. - repr: Option, - - /// The different variants, with explicit values. - variants: Vec, -} - -impl Enum { - /// Construct a new `Enum` with the given representation and variants. - pub fn new(repr: Option, variants: Vec) -> Self { - Enum { repr, variants } - } - - /// Get this enumeration's representation. - pub fn repr(&self) -> Option { - self.repr - } - - /// Get this enumeration's variants. - pub fn variants(&self) -> &[EnumVariant] { - &self.variants - } - - /// Construct an enumeration from the given Clang type. - pub fn from_ty( - ty: &clang::Type, - ctx: &mut BindgenContext, - ) -> Result { - use clang_sys::*; - debug!("Enum::from_ty {:?}", ty); - - if ty.kind() != CXType_Enum { - return Err(ParseError::Continue); - } - - let declaration = ty.declaration().canonical(); - let repr = declaration - .enum_type() - .and_then(|et| Item::from_ty(&et, declaration, None, ctx).ok()); - let mut variants = vec![]; - - let variant_ty = - repr.and_then(|r| ctx.resolve_type(r).safe_canonical_type(ctx)); - let is_bool = variant_ty.map_or(false, Type::is_bool); - - // Assume signedness since the default type by the C standard is an int. - let is_signed = variant_ty.map_or(true, |ty| match *ty.kind() { - TypeKind::Int(ref int_kind) => int_kind.is_signed(), - ref other => { - panic!("Since when enums can be non-integers? {:?}", other) - } - }); - - let type_name = ty.spelling(); - let type_name = if type_name.is_empty() { - None - } else { - Some(type_name) - }; - let type_name = type_name.as_deref(); - - let definition = declaration.definition().unwrap_or(declaration); - definition.visit(|cursor| { - if cursor.kind() == CXCursor_EnumConstantDecl { - let value = if is_bool { - cursor.enum_val_boolean().map(EnumVariantValue::Boolean) - } else if is_signed { - cursor.enum_val_signed().map(EnumVariantValue::Signed) - } else { - cursor.enum_val_unsigned().map(EnumVariantValue::Unsigned) - }; - if let Some(val) = value { - let name = cursor.spelling(); - let annotations = Annotations::new(&cursor); - let custom_behavior = ctx - .parse_callbacks() - .and_then(|callbacks| { - callbacks - .enum_variant_behavior(type_name, &name, val) - }) - .or_else(|| { - let annotations = annotations.as_ref()?; - if annotations.hide() { - Some(EnumVariantCustomBehavior::Hide) - } else if annotations.constify_enum_variant() { - Some(EnumVariantCustomBehavior::Constify) - } else { - None - } - }); - - let new_name = ctx - .parse_callbacks() - .and_then(|callbacks| { - callbacks.enum_variant_name(type_name, &name, val) - }) - .or_else(|| { - annotations - .as_ref()? - .use_instead_of()? - .last() - .cloned() - }) - .unwrap_or_else(|| name.clone()); - - let comment = cursor.raw_comment(); - variants.push(EnumVariant::new( - new_name, - name, - comment, - val, - custom_behavior, - )); - } - } - CXChildVisit_Continue - }); - Ok(Enum::new(repr, variants)) - } - - fn is_matching_enum( - &self, - ctx: &BindgenContext, - enums: &RegexSet, - item: &Item, - ) -> bool { - let path = item.path_for_allowlisting(ctx); - let enum_ty = item.expect_type(); - - if enums.matches(&path[1..].join("::")) { - return true; - } - - // Test the variants if the enum is anonymous. - if enum_ty.name().is_some() { - return false; - } - - self.variants().iter().any(|v| enums.matches(&v.name())) - } - - /// Returns the final representation of the enum. - pub fn computed_enum_variation( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> EnumVariation { - // ModuleConsts has higher precedence before Rust in order to avoid - // problems with overlapping match patterns. - if self.is_matching_enum( - ctx, - &ctx.options().constified_enum_modules, - item, - ) { - EnumVariation::ModuleConsts - } else if self.is_matching_enum( - ctx, - &ctx.options().bitfield_enums, - item, - ) { - EnumVariation::NewType { is_bitfield: true } - } else if self.is_matching_enum(ctx, &ctx.options().newtype_enums, item) - { - EnumVariation::NewType { is_bitfield: false } - } else if self.is_matching_enum( - ctx, - &ctx.options().rustified_enums, - item, - ) { - EnumVariation::Rust { - non_exhaustive: false, - } - } else if self.is_matching_enum( - ctx, - &ctx.options().rustified_non_exhaustive_enums, - item, - ) { - EnumVariation::Rust { - non_exhaustive: true, - } - } else if self.is_matching_enum( - ctx, - &ctx.options().constified_enums, - item, - ) { - EnumVariation::Consts - } else { - ctx.options().default_enum_style - } - } -} - -/// A single enum variant, to be contained only in an enum. -#[derive(Debug)] -pub struct EnumVariant { - /// The name of the variant. - name: String, - - /// The original name of the variant (without user mangling) - name_for_allowlisting: String, - - /// An optional doc comment. - comment: Option, - - /// The integer value of the variant. - val: EnumVariantValue, - - /// The custom behavior this variant may have, if any. - custom_behavior: Option, -} - -/// A constant value assigned to an enumeration variant. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum EnumVariantValue { - /// A boolean constant. - Boolean(bool), - - /// A signed constant. - Signed(i64), - - /// An unsigned constant. - Unsigned(u64), -} - -impl EnumVariant { - /// Construct a new enumeration variant from the given parts. - pub fn new( - name: String, - name_for_allowlisting: String, - comment: Option, - val: EnumVariantValue, - custom_behavior: Option, - ) -> Self { - EnumVariant { - name, - name_for_allowlisting, - comment, - val, - custom_behavior, - } - } - - /// Get this variant's name. - pub fn name(&self) -> &str { - &self.name - } - - /// Get this variant's name. - pub fn name_for_allowlisting(&self) -> &str { - &self.name_for_allowlisting - } - - /// Get this variant's value. - pub fn val(&self) -> EnumVariantValue { - self.val - } - - /// Get this variant's documentation. - pub fn comment(&self) -> Option<&str> { - self.comment.as_deref() - } - - /// Returns whether this variant should be enforced to be a constant by code - /// generation. - pub fn force_constification(&self) -> bool { - self.custom_behavior - .map_or(false, |b| b == EnumVariantCustomBehavior::Constify) - } - - /// Returns whether the current variant should be hidden completely from the - /// resulting rust enum. - pub fn hidden(&self) -> bool { - self.custom_behavior - .map_or(false, |b| b == EnumVariantCustomBehavior::Hide) - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/function.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/function.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/function.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/function.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,652 +0,0 @@ -//! Intermediate representation for C/C++ functions and methods. - -use super::comp::MethodKind; -use super::context::{BindgenContext, TypeId}; -use super::dot::DotAttributes; -use super::item::Item; -use super::traversal::{EdgeKind, Trace, Tracer}; -use super::ty::TypeKind; -use crate::clang; -use crate::parse::{ - ClangItemParser, ClangSubItemParser, ParseError, ParseResult, -}; -use clang_sys::{self, CXCallingConv}; -use proc_macro2; -use quote; -use quote::TokenStreamExt; -use std::io; - -const RUST_DERIVE_FUNPTR_LIMIT: usize = 12; - -/// What kind of a function are we looking at? -#[derive(Debug, Copy, Clone, PartialEq)] -pub enum FunctionKind { - /// A plain, free function. - Function, - /// A method of some kind. - Method(MethodKind), -} - -impl FunctionKind { - /// Given a clang cursor, return the kind of function it represents, or - /// `None` otherwise. - pub fn from_cursor(cursor: &clang::Cursor) -> Option { - // FIXME(emilio): Deduplicate logic with `ir::comp`. - Some(match cursor.kind() { - clang_sys::CXCursor_FunctionDecl => FunctionKind::Function, - clang_sys::CXCursor_Constructor => { - FunctionKind::Method(MethodKind::Constructor) - } - clang_sys::CXCursor_Destructor => { - FunctionKind::Method(if cursor.method_is_virtual() { - MethodKind::VirtualDestructor { - pure_virtual: cursor.method_is_pure_virtual(), - } - } else { - MethodKind::Destructor - }) - } - clang_sys::CXCursor_CXXMethod => { - if cursor.method_is_virtual() { - FunctionKind::Method(MethodKind::Virtual { - pure_virtual: cursor.method_is_pure_virtual(), - }) - } else if cursor.method_is_static() { - FunctionKind::Method(MethodKind::Static) - } else { - FunctionKind::Method(MethodKind::Normal) - } - } - _ => return None, - }) - } -} - -/// The style of linkage -#[derive(Debug, Clone, Copy)] -pub enum Linkage { - /// Externally visible and can be linked against - External, - /// Not exposed externally. 'static inline' functions will have this kind of linkage - Internal, -} - -/// A function declaration, with a signature, arguments, and argument names. -/// -/// The argument names vector must be the same length as the ones in the -/// signature. -#[derive(Debug)] -pub struct Function { - /// The name of this function. - name: String, - - /// The mangled name, that is, the symbol. - mangled_name: Option, - - /// The id pointing to the current function signature. - signature: TypeId, - - /// The doc comment on the function, if any. - comment: Option, - - /// The kind of function this is. - kind: FunctionKind, - - /// The linkage of the function. - linkage: Linkage, -} - -impl Function { - /// Construct a new function. - pub fn new( - name: String, - mangled_name: Option, - signature: TypeId, - comment: Option, - kind: FunctionKind, - linkage: Linkage, - ) -> Self { - Function { - name, - mangled_name, - signature, - comment, - kind, - linkage, - } - } - - /// Get this function's name. - pub fn name(&self) -> &str { - &self.name - } - - /// Get this function's name. - pub fn mangled_name(&self) -> Option<&str> { - self.mangled_name.as_deref() - } - - /// Get this function's signature type. - pub fn signature(&self) -> TypeId { - self.signature - } - - /// Get this function's kind. - pub fn kind(&self) -> FunctionKind { - self.kind - } - - /// Get this function's linkage. - pub fn linkage(&self) -> Linkage { - self.linkage - } -} - -impl DotAttributes for Function { - fn dot_attributes( - &self, - _ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - if let Some(ref mangled) = self.mangled_name { - let mangled: String = - mangled.chars().flat_map(|c| c.escape_default()).collect(); - writeln!( - out, - "mangled name{}", - mangled - )?; - } - - Ok(()) - } -} - -/// An ABI extracted from a clang cursor. -#[derive(Debug, Copy, Clone)] -pub enum Abi { - /// The default C ABI. - C, - /// The "stdcall" ABI. - Stdcall, - /// The "fastcall" ABI. - Fastcall, - /// The "thiscall" ABI. - ThisCall, - /// The "aapcs" ABI. - Aapcs, - /// The "win64" ABI. - Win64, - /// An unknown or invalid ABI. - Unknown(CXCallingConv), -} - -impl Abi { - /// Returns whether this Abi is known or not. - fn is_unknown(&self) -> bool { - matches!(*self, Abi::Unknown(..)) - } -} - -impl quote::ToTokens for Abi { - fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { - tokens.append_all(match *self { - Abi::C => quote! { "C" }, - Abi::Stdcall => quote! { "stdcall" }, - Abi::Fastcall => quote! { "fastcall" }, - Abi::ThisCall => quote! { "thiscall" }, - Abi::Aapcs => quote! { "aapcs" }, - Abi::Win64 => quote! { "win64" }, - Abi::Unknown(cc) => panic!( - "Cannot turn unknown calling convention to tokens: {:?}", - cc - ), - }); - } -} - -/// A function signature. -#[derive(Debug)] -pub struct FunctionSig { - /// The return type of the function. - return_type: TypeId, - - /// The type of the arguments, optionally with the name of the argument when - /// declared. - argument_types: Vec<(Option, TypeId)>, - - /// Whether this function is variadic. - is_variadic: bool, - - /// Whether this function's return value must be used. - must_use: bool, - - /// The ABI of this function. - abi: Abi, -} - -fn get_abi(cc: CXCallingConv) -> Abi { - use clang_sys::*; - match cc { - CXCallingConv_Default => Abi::C, - CXCallingConv_C => Abi::C, - CXCallingConv_X86StdCall => Abi::Stdcall, - CXCallingConv_X86FastCall => Abi::Fastcall, - CXCallingConv_X86ThisCall => Abi::ThisCall, - CXCallingConv_AAPCS => Abi::Aapcs, - CXCallingConv_X86_64Win64 => Abi::Win64, - other => Abi::Unknown(other), - } -} - -/// Get the mangled name for the cursor's referent. -pub fn cursor_mangling( - ctx: &BindgenContext, - cursor: &clang::Cursor, -) -> Option { - if !ctx.options().enable_mangling { - return None; - } - - // We early return here because libclang may crash in some case - // if we pass in a variable inside a partial specialized template. - // See rust-lang/rust-bindgen#67, and rust-lang/rust-bindgen#462. - if cursor.is_in_non_fully_specialized_template() { - return None; - } - - let is_destructor = cursor.kind() == clang_sys::CXCursor_Destructor; - if let Ok(mut manglings) = cursor.cxx_manglings() { - while let Some(m) = manglings.pop() { - // Only generate the destructor group 1, see below. - if is_destructor && !m.ends_with("D1Ev") { - continue; - } - - return Some(m); - } - } - - let mut mangling = cursor.mangling(); - if mangling.is_empty() { - return None; - } - - if is_destructor { - // With old (3.8-) libclang versions, and the Itanium ABI, clang returns - // the "destructor group 0" symbol, which means that it'll try to free - // memory, which definitely isn't what we want. - // - // Explicitly force the destructor group 1 symbol. - // - // See http://refspecs.linuxbase.org/cxxabi-1.83.html#mangling-special - // for the reference, and http://stackoverflow.com/a/6614369/1091587 for - // a more friendly explanation. - // - // We don't need to do this for constructors since clang seems to always - // have returned the C1 constructor. - // - // FIXME(emilio): Can a legit symbol in other ABIs end with this string? - // I don't think so, but if it can this would become a linker error - // anyway, not an invalid free at runtime. - // - // TODO(emilio, #611): Use cpp_demangle if this becomes nastier with - // time. - if mangling.ends_with("D0Ev") { - let new_len = mangling.len() - 4; - mangling.truncate(new_len); - mangling.push_str("D1Ev"); - } - } - - Some(mangling) -} - -fn args_from_ty_and_cursor( - ty: &clang::Type, - cursor: &clang::Cursor, - ctx: &mut BindgenContext, -) -> Vec<(Option, TypeId)> { - let cursor_args = cursor.args().unwrap_or_default().into_iter(); - let type_args = ty.args().unwrap_or_default().into_iter(); - - // Argument types can be found in either the cursor or the type, but argument names may only be - // found on the cursor. We often have access to both a type and a cursor for each argument, but - // in some cases we may only have one. - // - // Prefer using the type as the source of truth for the argument's type, but fall back to - // inspecting the cursor (this happens for Objective C interfaces). - // - // Prefer using the cursor for the argument's type, but fall back to using the parent's cursor - // (this happens for function pointer return types). - cursor_args - .map(Some) - .chain(std::iter::repeat(None)) - .zip(type_args.map(Some).chain(std::iter::repeat(None))) - .take_while(|(cur, ty)| cur.is_some() || ty.is_some()) - .map(|(arg_cur, arg_ty)| { - let name = arg_cur.map(|a| a.spelling()).and_then(|name| { - if name.is_empty() { - None - } else { - Some(name) - } - }); - - let cursor = arg_cur.unwrap_or(*cursor); - let ty = arg_ty.unwrap_or_else(|| cursor.cur_type()); - (name, Item::from_ty_or_ref(ty, cursor, None, ctx)) - }) - .collect() -} - -impl FunctionSig { - /// Construct a new function signature. - pub fn new( - return_type: TypeId, - argument_types: Vec<(Option, TypeId)>, - is_variadic: bool, - must_use: bool, - abi: Abi, - ) -> Self { - FunctionSig { - return_type, - argument_types, - is_variadic, - must_use, - abi, - } - } - - /// Construct a new function signature from the given Clang type. - pub fn from_ty( - ty: &clang::Type, - cursor: &clang::Cursor, - ctx: &mut BindgenContext, - ) -> Result { - use clang_sys::*; - debug!("FunctionSig::from_ty {:?} {:?}", ty, cursor); - - // Skip function templates - let kind = cursor.kind(); - if kind == CXCursor_FunctionTemplate { - return Err(ParseError::Continue); - } - - let spelling = cursor.spelling(); - - // Don't parse operatorxx functions in C++ - let is_operator = |spelling: &str| { - spelling.starts_with("operator") && - !clang::is_valid_identifier(spelling) - }; - if is_operator(&spelling) { - return Err(ParseError::Continue); - } - - // Constructors of non-type template parameter classes for some reason - // include the template parameter in their name. Just skip them, since - // we don't handle well non-type template parameters anyway. - if (kind == CXCursor_Constructor || kind == CXCursor_Destructor) && - spelling.contains('<') - { - return Err(ParseError::Continue); - } - - let cursor = if cursor.is_valid() { - *cursor - } else { - ty.declaration() - }; - - let mut args = match kind { - CXCursor_FunctionDecl | - CXCursor_Constructor | - CXCursor_CXXMethod | - CXCursor_ObjCInstanceMethodDecl | - CXCursor_ObjCClassMethodDecl => { - args_from_ty_and_cursor(ty, &cursor, ctx) - } - _ => { - // For non-CXCursor_FunctionDecl, visiting the cursor's children - // is the only reliable way to get parameter names. - let mut args = vec![]; - cursor.visit(|c| { - if c.kind() == CXCursor_ParmDecl { - let ty = - Item::from_ty_or_ref(c.cur_type(), c, None, ctx); - let name = c.spelling(); - let name = - if name.is_empty() { None } else { Some(name) }; - args.push((name, ty)); - } - CXChildVisit_Continue - }); - - if args.is_empty() { - // FIXME(emilio): Sometimes libclang doesn't expose the - // right AST for functions tagged as stdcall and such... - // - // https://bugs.llvm.org/show_bug.cgi?id=45919 - args_from_ty_and_cursor(ty, &cursor, ctx) - } else { - args - } - } - }; - - let must_use = ctx.options().enable_function_attribute_detection && - cursor.has_warn_unused_result_attr(); - let is_method = kind == CXCursor_CXXMethod; - let is_constructor = kind == CXCursor_Constructor; - let is_destructor = kind == CXCursor_Destructor; - if (is_constructor || is_destructor || is_method) && - cursor.lexical_parent() != cursor.semantic_parent() - { - // Only parse constructors once. - return Err(ParseError::Continue); - } - - if is_method || is_constructor || is_destructor { - let is_const = is_method && cursor.method_is_const(); - let is_virtual = is_method && cursor.method_is_virtual(); - let is_static = is_method && cursor.method_is_static(); - if !is_static && !is_virtual { - let parent = cursor.semantic_parent(); - let class = Item::parse(parent, None, ctx) - .expect("Expected to parse the class"); - // The `class` most likely is not finished parsing yet, so use - // the unchecked variant. - let class = class.as_type_id_unchecked(); - - let class = if is_const { - let const_class_id = ctx.next_item_id(); - ctx.build_const_wrapper( - const_class_id, - class, - None, - &parent.cur_type(), - ) - } else { - class - }; - - let ptr = - Item::builtin_type(TypeKind::Pointer(class), false, ctx); - args.insert(0, (Some("this".into()), ptr)); - } else if is_virtual { - let void = Item::builtin_type(TypeKind::Void, false, ctx); - let ptr = - Item::builtin_type(TypeKind::Pointer(void), false, ctx); - args.insert(0, (Some("this".into()), ptr)); - } - } - - let ty_ret_type = if kind == CXCursor_ObjCInstanceMethodDecl || - kind == CXCursor_ObjCClassMethodDecl - { - ty.ret_type() - .or_else(|| cursor.ret_type()) - .ok_or(ParseError::Continue)? - } else { - ty.ret_type().ok_or(ParseError::Continue)? - }; - - let ret = if is_constructor && ctx.is_target_wasm32() { - // Constructors in Clang wasm32 target return a pointer to the object - // being constructed. - let void = Item::builtin_type(TypeKind::Void, false, ctx); - Item::builtin_type(TypeKind::Pointer(void), false, ctx) - } else { - Item::from_ty_or_ref(ty_ret_type, cursor, None, ctx) - }; - - // Clang plays with us at "find the calling convention", see #549 and - // co. This seems to be a better fix than that commit. - let mut call_conv = ty.call_conv(); - if let Some(ty) = cursor.cur_type().canonical_type().pointee_type() { - let cursor_call_conv = ty.call_conv(); - if cursor_call_conv != CXCallingConv_Invalid { - call_conv = cursor_call_conv; - } - } - let abi = get_abi(call_conv); - - if abi.is_unknown() { - warn!("Unknown calling convention: {:?}", call_conv); - } - - Ok(Self::new(ret, args, ty.is_variadic(), must_use, abi)) - } - - /// Get this function signature's return type. - pub fn return_type(&self) -> TypeId { - self.return_type - } - - /// Get this function signature's argument (name, type) pairs. - pub fn argument_types(&self) -> &[(Option, TypeId)] { - &self.argument_types - } - - /// Get this function signature's ABI. - pub fn abi(&self) -> Abi { - self.abi - } - - /// Is this function signature variadic? - pub fn is_variadic(&self) -> bool { - // Clang reports some functions as variadic when they *might* be - // variadic. We do the argument check because rust doesn't codegen well - // variadic functions without an initial argument. - self.is_variadic && !self.argument_types.is_empty() - } - - /// Must this function's return value be used? - pub fn must_use(&self) -> bool { - self.must_use - } - - /// Are function pointers with this signature able to derive Rust traits? - /// Rust only supports deriving traits for function pointers with a limited - /// number of parameters and a couple ABIs. - /// - /// For more details, see: - /// - /// * https://github.com/rust-lang/rust-bindgen/issues/547, - /// * https://github.com/rust-lang/rust/issues/38848, - /// * and https://github.com/rust-lang/rust/issues/40158 - pub fn function_pointers_can_derive(&self) -> bool { - if self.argument_types.len() > RUST_DERIVE_FUNPTR_LIMIT { - return false; - } - - matches!(self.abi, Abi::C | Abi::Unknown(..)) - } -} - -impl ClangSubItemParser for Function { - fn parse( - cursor: clang::Cursor, - context: &mut BindgenContext, - ) -> Result, ParseError> { - use clang_sys::*; - - let kind = match FunctionKind::from_cursor(&cursor) { - None => return Err(ParseError::Continue), - Some(k) => k, - }; - - debug!("Function::parse({:?}, {:?})", cursor, cursor.cur_type()); - - let visibility = cursor.visibility(); - if visibility != CXVisibility_Default { - return Err(ParseError::Continue); - } - - if cursor.access_specifier() == CX_CXXPrivate { - return Err(ParseError::Continue); - } - - if cursor.is_inlined_function() { - if !context.options().generate_inline_functions { - return Err(ParseError::Continue); - } - if cursor.is_deleted_function() { - return Err(ParseError::Continue); - } - } - - let linkage = cursor.linkage(); - let linkage = match linkage { - CXLinkage_External | CXLinkage_UniqueExternal => Linkage::External, - CXLinkage_Internal => Linkage::Internal, - _ => return Err(ParseError::Continue), - }; - - // Grab the signature using Item::from_ty. - let sig = Item::from_ty(&cursor.cur_type(), cursor, None, context)?; - - let mut name = cursor.spelling(); - assert!(!name.is_empty(), "Empty function name?"); - - if cursor.kind() == CXCursor_Destructor { - // Remove the leading `~`. The alternative to this is special-casing - // code-generation for destructor functions, which seems less than - // ideal. - if name.starts_with('~') { - name.remove(0); - } - - // Add a suffix to avoid colliding with constructors. This would be - // technically fine (since we handle duplicated functions/methods), - // but seems easy enough to handle it here. - name.push_str("_destructor"); - } - - let mangled_name = cursor_mangling(context, &cursor); - let comment = cursor.raw_comment(); - - let function = - Self::new(name, mangled_name, sig, comment, kind, linkage); - Ok(ParseResult::New(function, Some(cursor))) - } -} - -impl Trace for FunctionSig { - type Extra = (); - - fn trace(&self, _: &BindgenContext, tracer: &mut T, _: &()) - where - T: Tracer, - { - tracer.visit_kind(self.return_type().into(), EdgeKind::FunctionReturn); - - for &(_, ty) in self.argument_types() { - tracer.visit_kind(ty.into(), EdgeKind::FunctionParameter); - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/int.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/int.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/int.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/int.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,127 +0,0 @@ -//! Intermediate representation for integral types. - -/// Which integral type are we dealing with? -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum IntKind { - /// A `bool`. - Bool, - - /// A `signed char`. - SChar, - - /// An `unsigned char`. - UChar, - - /// An `wchar_t`. - WChar, - - /// A platform-dependent `char` type, with the signedness support. - Char { - /// Whether the char is signed for the target platform. - is_signed: bool, - }, - - /// A `short`. - Short, - - /// An `unsigned short`. - UShort, - - /// An `int`. - Int, - - /// An `unsigned int`. - UInt, - - /// A `long`. - Long, - - /// An `unsigned long`. - ULong, - - /// A `long long`. - LongLong, - - /// An `unsigned long long`. - ULongLong, - - /// A 8-bit signed integer. - I8, - - /// A 8-bit unsigned integer. - U8, - - /// A 16-bit signed integer. - I16, - - /// Either a `char16_t` or a `wchar_t`. - U16, - - /// A 32-bit signed integer. - I32, - - /// A 32-bit unsigned integer. - U32, - - /// A 64-bit signed integer. - I64, - - /// A 64-bit unsigned integer. - U64, - - /// An `int128_t` - I128, - - /// A `uint128_t`. - U128, - - /// A custom integer type, used to allow custom macro types depending on - /// range. - Custom { - /// The name of the type, which would be used without modification. - name: &'static str, - /// Whether the type is signed or not. - is_signed: bool, - }, -} - -impl IntKind { - /// Is this integral type signed? - pub fn is_signed(&self) -> bool { - use self::IntKind::*; - match *self { - // TODO(emilio): wchar_t can in theory be signed, but we have no way - // to know whether it is or not right now (unlike char, there's no - // WChar_S / WChar_U). - Bool | UChar | UShort | UInt | ULong | ULongLong | U8 | U16 | - WChar | U32 | U64 | U128 => false, - - SChar | Short | Int | Long | LongLong | I8 | I16 | I32 | I64 | - I128 => true, - - Char { is_signed } => is_signed, - - Custom { is_signed, .. } => is_signed, - } - } - - /// If this type has a known size, return it (in bytes). This is to - /// alleviate libclang sometimes not giving us a layout (like in the case - /// when an enum is defined inside a class with template parameters). - pub fn known_size(&self) -> Option { - use self::IntKind::*; - Some(match *self { - Bool | UChar | SChar | U8 | I8 | Char { .. } => 1, - U16 | I16 => 2, - U32 | I32 => 4, - U64 | I64 => 8, - I128 | U128 => 16, - _ => return None, - }) - } - - /// Whether this type's signedness matches the value. - pub fn signedness_matches(&self, val: i64) -> bool { - val >= 0 || self.is_signed() - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/item_kind.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/item_kind.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/item_kind.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/item_kind.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,147 +0,0 @@ -//! Different variants of an `Item` in our intermediate representation. - -use super::context::BindgenContext; -use super::dot::DotAttributes; -use super::function::Function; -use super::module::Module; -use super::ty::Type; -use super::var::Var; -use std::io; - -/// A item we parse and translate. -#[derive(Debug)] -pub enum ItemKind { - /// A module, created implicitly once (the root module), or via C++ - /// namespaces. - Module(Module), - - /// A type declared in any of the multiple ways it can be declared. - Type(Type), - - /// A function or method declaration. - Function(Function), - - /// A variable declaration, most likely a static. - Var(Var), -} - -impl ItemKind { - /// Get a reference to this `ItemKind`'s underying `Module`, or `None` if it - /// is some other kind. - pub fn as_module(&self) -> Option<&Module> { - match *self { - ItemKind::Module(ref module) => Some(module), - _ => None, - } - } - - /// Transform our `ItemKind` into a string. - pub fn kind_name(&self) -> &'static str { - match *self { - ItemKind::Module(..) => "Module", - ItemKind::Type(..) => "Type", - ItemKind::Function(..) => "Function", - ItemKind::Var(..) => "Var", - } - } - - /// Is this a module? - pub fn is_module(&self) -> bool { - self.as_module().is_some() - } - - /// Get a reference to this `ItemKind`'s underying `Module`, or panic if it - /// is some other kind. - pub fn expect_module(&self) -> &Module { - self.as_module().expect("Not a module") - } - - /// Get a reference to this `ItemKind`'s underying `Function`, or `None` if - /// it is some other kind. - pub fn as_function(&self) -> Option<&Function> { - match *self { - ItemKind::Function(ref func) => Some(func), - _ => None, - } - } - - /// Is this a function? - pub fn is_function(&self) -> bool { - self.as_function().is_some() - } - - /// Get a reference to this `ItemKind`'s underying `Function`, or panic if - /// it is some other kind. - pub fn expect_function(&self) -> &Function { - self.as_function().expect("Not a function") - } - - /// Get a reference to this `ItemKind`'s underying `Type`, or `None` if - /// it is some other kind. - pub fn as_type(&self) -> Option<&Type> { - match *self { - ItemKind::Type(ref ty) => Some(ty), - _ => None, - } - } - - /// Get a mutable reference to this `ItemKind`'s underying `Type`, or `None` - /// if it is some other kind. - pub fn as_type_mut(&mut self) -> Option<&mut Type> { - match *self { - ItemKind::Type(ref mut ty) => Some(ty), - _ => None, - } - } - - /// Is this a type? - pub fn is_type(&self) -> bool { - self.as_type().is_some() - } - - /// Get a reference to this `ItemKind`'s underying `Type`, or panic if it is - /// some other kind. - pub fn expect_type(&self) -> &Type { - self.as_type().expect("Not a type") - } - - /// Get a reference to this `ItemKind`'s underying `Var`, or `None` if it is - /// some other kind. - pub fn as_var(&self) -> Option<&Var> { - match *self { - ItemKind::Var(ref v) => Some(v), - _ => None, - } - } - - /// Is this a variable? - pub fn is_var(&self) -> bool { - self.as_var().is_some() - } - - /// Get a reference to this `ItemKind`'s underying `Var`, or panic if it is - /// some other kind. - pub fn expect_var(&self) -> &Var { - self.as_var().expect("Not a var") - } -} - -impl DotAttributes for ItemKind { - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!(out, "kind{}", self.kind_name())?; - - match *self { - ItemKind::Module(ref module) => module.dot_attributes(ctx, out), - ItemKind::Type(ref ty) => ty.dot_attributes(ctx, out), - ItemKind::Function(ref func) => func.dot_attributes(ctx, out), - ItemKind::Var(ref var) => var.dot_attributes(ctx, out), - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/item.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/item.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/item.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/item.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2008 +0,0 @@ -//! Bindgen's core intermediate representation type. - -use super::super::codegen::{EnumVariation, CONSTIFIED_ENUM_MODULE_REPR_NAME}; -use super::analysis::{HasVtable, HasVtableResult, Sizedness, SizednessResult}; -use super::annotations::Annotations; -use super::comment; -use super::comp::{CompKind, MethodKind}; -use super::context::{BindgenContext, ItemId, PartialType, TypeId}; -use super::derive::{ - CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, - CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, -}; -use super::dot::DotAttributes; -use super::function::{Function, FunctionKind}; -use super::item_kind::ItemKind; -use super::layout::Opaque; -use super::module::Module; -use super::template::{AsTemplateParam, TemplateParameters}; -use super::traversal::{EdgeKind, Trace, Tracer}; -use super::ty::{Type, TypeKind}; -use crate::clang; -use crate::parse::{ - ClangItemParser, ClangSubItemParser, ParseError, ParseResult, -}; -use clang_sys; -use lazycell::LazyCell; -use regex; -use std::cell::Cell; -use std::collections::BTreeSet; -use std::fmt::Write; -use std::io; -use std::iter; - -/// A trait to get the canonical name from an item. -/// -/// This is the trait that will eventually isolate all the logic related to name -/// mangling and that kind of stuff. -/// -/// This assumes no nested paths, at some point I'll have to make it a more -/// complex thing. -/// -/// This name is required to be safe for Rust, that is, is not expected to -/// return any rust keyword from here. -pub trait ItemCanonicalName { - /// Get the canonical name for this item. - fn canonical_name(&self, ctx: &BindgenContext) -> String; -} - -/// The same, but specifies the path that needs to be followed to reach an item. -/// -/// To contrast with canonical_name, here's an example: -/// -/// ```c++ -/// namespace foo { -/// const BAR = 3; -/// } -/// ``` -/// -/// For bar, the canonical path is `vec!["foo", "BAR"]`, while the canonical -/// name is just `"BAR"`. -pub trait ItemCanonicalPath { - /// Get the namespace-aware canonical path for this item. This means that if - /// namespaces are disabled, you'll get a single item, and otherwise you get - /// the whole path. - fn namespace_aware_canonical_path( - &self, - ctx: &BindgenContext, - ) -> Vec; - - /// Get the canonical path for this item. - fn canonical_path(&self, ctx: &BindgenContext) -> Vec; -} - -/// A trait for determining if some IR thing is opaque or not. -pub trait IsOpaque { - /// Extra context the IR thing needs to determine if it is opaque or not. - type Extra; - - /// Returns `true` if the thing is opaque, and `false` otherwise. - /// - /// May only be called when `ctx` is in the codegen phase. - fn is_opaque(&self, ctx: &BindgenContext, extra: &Self::Extra) -> bool; -} - -/// A trait for determining if some IR thing has type parameter in array or not. -pub trait HasTypeParamInArray { - /// Returns `true` if the thing has Array, and `false` otherwise. - fn has_type_param_in_array(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait for determining if some IR thing has float or not. -pub trait HasFloat { - /// Returns `true` if the thing has float, and `false` otherwise. - fn has_float(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait for iterating over an item and its parents and up its ancestor chain -/// up to (but not including) the implicit root module. -pub trait ItemAncestors { - /// Get an iterable over this item's ancestors. - fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a>; -} - -#[cfg(testing_only_extra_assertions)] -type DebugOnlyItemSet = ItemSet; - -#[cfg(not(testing_only_extra_assertions))] -struct DebugOnlyItemSet; - -#[cfg(not(testing_only_extra_assertions))] -impl DebugOnlyItemSet { - fn new() -> Self { - DebugOnlyItemSet - } - - fn contains(&self, _id: &ItemId) -> bool { - false - } - - fn insert(&mut self, _id: ItemId) {} -} - -/// An iterator over an item and its ancestors. -pub struct ItemAncestorsIter<'a> { - item: ItemId, - ctx: &'a BindgenContext, - seen: DebugOnlyItemSet, -} - -impl<'a> ItemAncestorsIter<'a> { - fn new>(ctx: &'a BindgenContext, id: Id) -> Self { - ItemAncestorsIter { - item: id.into(), - ctx, - seen: DebugOnlyItemSet::new(), - } - } -} - -impl<'a> Iterator for ItemAncestorsIter<'a> { - type Item = ItemId; - - fn next(&mut self) -> Option { - let item = self.ctx.resolve_item(self.item); - - if item.parent_id() == self.item { - None - } else { - self.item = item.parent_id(); - - extra_assert!(!self.seen.contains(&item.id())); - self.seen.insert(item.id()); - - Some(item.id()) - } - } -} - -impl AsTemplateParam for T -where - T: Copy + Into, -{ - type Extra = (); - - fn as_template_param( - &self, - ctx: &BindgenContext, - _: &(), - ) -> Option { - ctx.resolve_item((*self).into()).as_template_param(ctx, &()) - } -} - -impl AsTemplateParam for Item { - type Extra = (); - - fn as_template_param( - &self, - ctx: &BindgenContext, - _: &(), - ) -> Option { - self.kind.as_template_param(ctx, self) - } -} - -impl AsTemplateParam for ItemKind { - type Extra = Item; - - fn as_template_param( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> Option { - match *self { - ItemKind::Type(ref ty) => ty.as_template_param(ctx, item), - ItemKind::Module(..) | - ItemKind::Function(..) | - ItemKind::Var(..) => None, - } - } -} - -impl ItemCanonicalName for T -where - T: Copy + Into, -{ - fn canonical_name(&self, ctx: &BindgenContext) -> String { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.resolve_item(*self).canonical_name(ctx) - } -} - -impl ItemCanonicalPath for T -where - T: Copy + Into, -{ - fn namespace_aware_canonical_path( - &self, - ctx: &BindgenContext, - ) -> Vec { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.resolve_item(*self).namespace_aware_canonical_path(ctx) - } - - fn canonical_path(&self, ctx: &BindgenContext) -> Vec { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.resolve_item(*self).canonical_path(ctx) - } -} - -impl ItemAncestors for T -where - T: Copy + Into, -{ - fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a> { - ItemAncestorsIter::new(ctx, *self) - } -} - -impl ItemAncestors for Item { - fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a> { - self.id().ancestors(ctx) - } -} - -impl Trace for Id -where - Id: Copy + Into, -{ - type Extra = (); - - fn trace(&self, ctx: &BindgenContext, tracer: &mut T, extra: &()) - where - T: Tracer, - { - ctx.resolve_item(*self).trace(ctx, tracer, extra); - } -} - -impl Trace for Item { - type Extra = (); - - fn trace(&self, ctx: &BindgenContext, tracer: &mut T, _extra: &()) - where - T: Tracer, - { - // Even if this item is blocklisted/hidden, we want to trace it. It is - // traversal iterators' consumers' responsibility to filter items as - // needed. Generally, this filtering happens in the implementation of - // `Iterator` for `allowlistedItems`. Fully tracing blocklisted items is - // necessary for things like the template parameter usage analysis to - // function correctly. - - match *self.kind() { - ItemKind::Type(ref ty) => { - // There are some types, like resolved type references, where we - // don't want to stop collecting types even though they may be - // opaque. - if ty.should_be_traced_unconditionally() || - !self.is_opaque(ctx, &()) - { - ty.trace(ctx, tracer, self); - } - } - ItemKind::Function(ref fun) => { - // Just the same way, it has not real meaning for a function to - // be opaque, so we trace across it. - tracer.visit(fun.signature().into()); - } - ItemKind::Var(ref var) => { - tracer.visit_kind(var.ty().into(), EdgeKind::VarType); - } - ItemKind::Module(_) => { - // Module -> children edges are "weak", and we do not want to - // trace them. If we did, then allowlisting wouldn't work as - // expected: everything in every module would end up - // allowlisted. - // - // TODO: make a new edge kind for module -> children edges and - // filter them during allowlisting traversals. - } - } - } -} - -impl CanDeriveDebug for Item { - fn can_derive_debug(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_debug(ctx) - } -} - -impl CanDeriveDefault for Item { - fn can_derive_default(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_default(ctx) - } -} - -impl CanDeriveCopy for Item { - fn can_derive_copy(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_copy(ctx) - } -} - -impl CanDeriveHash for Item { - fn can_derive_hash(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_hash(ctx) - } -} - -impl CanDerivePartialOrd for Item { - fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_partialord(ctx) - } -} - -impl CanDerivePartialEq for Item { - fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_partialeq(ctx) - } -} - -impl CanDeriveEq for Item { - fn can_derive_eq(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_eq(ctx) - } -} - -impl CanDeriveOrd for Item { - fn can_derive_ord(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_ord(ctx) - } -} - -/// An item is the base of the bindgen representation, it can be either a -/// module, a type, a function, or a variable (see `ItemKind` for more -/// information). -/// -/// Items refer to each other by `ItemId`. Every item has its parent's -/// id. Depending on the kind of item this is, it may also refer to other items, -/// such as a compound type item referring to other types. Collectively, these -/// references form a graph. -/// -/// The entry-point to this graph is the "root module": a meta-item used to hold -/// all top-level items. -/// -/// An item may have a comment, and annotations (see the `annotations` module). -/// -/// Note that even though we parse all the types of annotations in comments, not -/// all of them apply to every item. Those rules are described in the -/// `annotations` module. -#[derive(Debug)] -pub struct Item { - /// This item's id. - id: ItemId, - - /// The item's local id, unique only amongst its siblings. Only used for - /// anonymous items. - /// - /// Lazily initialized in local_id(). - /// - /// Note that only structs, unions, and enums get a local type id. In any - /// case this is an implementation detail. - local_id: LazyCell, - - /// The next local id to use for a child or template instantiation. - next_child_local_id: Cell, - - /// A cached copy of the canonical name, as returned by `canonical_name`. - /// - /// This is a fairly used operation during codegen so this makes bindgen - /// considerably faster in those cases. - canonical_name: LazyCell, - - /// The path to use for allowlisting and other name-based checks, as - /// returned by `path_for_allowlisting`, lazily constructed. - path_for_allowlisting: LazyCell>, - - /// A doc comment over the item, if any. - comment: Option, - /// Annotations extracted from the doc comment, or the default ones - /// otherwise. - annotations: Annotations, - /// An item's parent id. This will most likely be a class where this item - /// was declared, or a module, etc. - /// - /// All the items have a parent, except the root module, in which case the - /// parent id is its own id. - parent_id: ItemId, - /// The item kind. - kind: ItemKind, - /// The source location of the item. - location: Option, -} - -impl AsRef for Item { - fn as_ref(&self) -> &ItemId { - &self.id - } -} - -impl Item { - /// Construct a new `Item`. - pub fn new( - id: ItemId, - comment: Option, - annotations: Option, - parent_id: ItemId, - kind: ItemKind, - location: Option, - ) -> Self { - debug_assert!(id != parent_id || kind.is_module()); - Item { - id, - local_id: LazyCell::new(), - next_child_local_id: Cell::new(1), - canonical_name: LazyCell::new(), - path_for_allowlisting: LazyCell::new(), - parent_id, - comment, - annotations: annotations.unwrap_or_default(), - kind, - location, - } - } - - /// Construct a new opaque item type. - pub fn new_opaque_type( - with_id: ItemId, - ty: &clang::Type, - ctx: &mut BindgenContext, - ) -> TypeId { - let location = ty.declaration().location(); - let ty = Opaque::from_clang_ty(ty, ctx); - let kind = ItemKind::Type(ty); - let parent = ctx.root_module().into(); - ctx.add_item( - Item::new(with_id, None, None, parent, kind, Some(location)), - None, - None, - ); - with_id.as_type_id_unchecked() - } - - /// Get this `Item`'s identifier. - pub fn id(&self) -> ItemId { - self.id - } - - /// Get this `Item`'s parent's identifier. - /// - /// For the root module, the parent's ID is its own ID. - pub fn parent_id(&self) -> ItemId { - self.parent_id - } - - /// Set this item's parent id. - /// - /// This is only used so replacements get generated in the proper module. - pub fn set_parent_for_replacement>(&mut self, id: Id) { - self.parent_id = id.into(); - } - - /// Returns the depth this item is indented to. - /// - /// FIXME(emilio): This may need fixes for the enums within modules stuff. - pub fn codegen_depth(&self, ctx: &BindgenContext) -> usize { - if !ctx.options().enable_cxx_namespaces { - return 0; - } - - self.ancestors(ctx) - .filter(|id| { - ctx.resolve_item(*id).as_module().map_or(false, |module| { - !module.is_inline() || - ctx.options().conservative_inline_namespaces - }) - }) - .count() + - 1 - } - - /// Get this `Item`'s comment, if it has any, already preprocessed and with - /// the right indentation. - pub fn comment(&self, ctx: &BindgenContext) -> Option { - if !ctx.options().generate_comments { - return None; - } - - self.comment.as_ref().map(|comment| { - comment::preprocess(comment, self.codegen_depth(ctx)) - }) - } - - /// What kind of item is this? - pub fn kind(&self) -> &ItemKind { - &self.kind - } - - /// Get a mutable reference to this item's kind. - pub fn kind_mut(&mut self) -> &mut ItemKind { - &mut self.kind - } - - /// Get an identifier that differentiates this item from its siblings. - /// - /// This should stay relatively stable in the face of code motion outside or - /// below this item's lexical scope, meaning that this can be useful for - /// generating relatively stable identifiers within a scope. - pub fn local_id(&self, ctx: &BindgenContext) -> usize { - *self.local_id.borrow_with(|| { - let parent = ctx.resolve_item(self.parent_id); - parent.next_child_local_id() - }) - } - - /// Get an identifier that differentiates a child of this item of other - /// related items. - /// - /// This is currently used for anonymous items, and template instantiation - /// tests, in both cases in order to reduce noise when system headers are at - /// place. - pub fn next_child_local_id(&self) -> usize { - let local_id = self.next_child_local_id.get(); - self.next_child_local_id.set(local_id + 1); - local_id - } - - /// Returns whether this item is a top-level item, from the point of view of - /// bindgen. - /// - /// This point of view changes depending on whether namespaces are enabled - /// or not. That way, in the following example: - /// - /// ```c++ - /// namespace foo { - /// static int var; - /// } - /// ``` - /// - /// `var` would be a toplevel item if namespaces are disabled, but won't if - /// they aren't. - /// - /// This function is used to determine when the codegen phase should call - /// `codegen` on an item, since any item that is not top-level will be - /// generated by its parent. - pub fn is_toplevel(&self, ctx: &BindgenContext) -> bool { - // FIXME: Workaround for some types falling behind when parsing weird - // stl classes, for example. - if ctx.options().enable_cxx_namespaces && - self.kind().is_module() && - self.id() != ctx.root_module() - { - return false; - } - - let mut parent = self.parent_id; - loop { - let parent_item = match ctx.resolve_item_fallible(parent) { - Some(item) => item, - None => return false, - }; - - if parent_item.id() == ctx.root_module() { - return true; - } else if ctx.options().enable_cxx_namespaces || - !parent_item.kind().is_module() - { - return false; - } - - parent = parent_item.parent_id(); - } - } - - /// Get a reference to this item's underlying `Type`. Panic if this is some - /// other kind of item. - pub fn expect_type(&self) -> &Type { - self.kind().expect_type() - } - - /// Get a reference to this item's underlying `Type`, or `None` if this is - /// some other kind of item. - pub fn as_type(&self) -> Option<&Type> { - self.kind().as_type() - } - - /// Get a reference to this item's underlying `Function`. Panic if this is - /// some other kind of item. - pub fn expect_function(&self) -> &Function { - self.kind().expect_function() - } - - /// Is this item a module? - pub fn is_module(&self) -> bool { - matches!(self.kind, ItemKind::Module(..)) - } - - /// Get this item's annotations. - pub fn annotations(&self) -> &Annotations { - &self.annotations - } - - /// Whether this item should be blocklisted. - /// - /// This may be due to either annotations or to other kind of configuration. - pub fn is_blocklisted(&self, ctx: &BindgenContext) -> bool { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - if self.annotations.hide() { - return true; - } - - if !ctx.options().blocklisted_files.is_empty() { - if let Some(location) = &self.location { - let (file, _, _, _) = location.location(); - if let Some(filename) = file.name() { - if ctx.options().blocklisted_files.matches(&filename) { - return true; - } - } - } - } - - let path = self.path_for_allowlisting(ctx); - let name = path[1..].join("::"); - ctx.options().blocklisted_items.matches(&name) || - match self.kind { - ItemKind::Type(..) => { - ctx.options().blocklisted_types.matches(&name) || - ctx.is_replaced_type(path, self.id) - } - ItemKind::Function(..) => { - ctx.options().blocklisted_functions.matches(&name) - } - // TODO: Add constant / namespace blocklisting? - ItemKind::Var(..) | ItemKind::Module(..) => false, - } - } - - /// Is this a reference to another type? - pub fn is_type_ref(&self) -> bool { - self.as_type().map_or(false, |ty| ty.is_type_ref()) - } - - /// Is this item a var type? - pub fn is_var(&self) -> bool { - matches!(*self.kind(), ItemKind::Var(..)) - } - - /// Take out item NameOptions - pub fn name<'a>(&'a self, ctx: &'a BindgenContext) -> NameOptions<'a> { - NameOptions::new(self, ctx) - } - - /// Get the target item id for name generation. - fn name_target(&self, ctx: &BindgenContext) -> ItemId { - let mut targets_seen = DebugOnlyItemSet::new(); - let mut item = self; - - loop { - extra_assert!(!targets_seen.contains(&item.id())); - targets_seen.insert(item.id()); - - if self.annotations().use_instead_of().is_some() { - return self.id(); - } - - match *item.kind() { - ItemKind::Type(ref ty) => match *ty.kind() { - TypeKind::ResolvedTypeRef(inner) => { - item = ctx.resolve_item(inner); - } - TypeKind::TemplateInstantiation(ref inst) => { - item = ctx.resolve_item(inst.template_definition()); - } - _ => return item.id(), - }, - _ => return item.id(), - } - } - } - - /// Create a fully disambiguated name for an item, including template - /// parameters if it is a type - pub fn full_disambiguated_name(&self, ctx: &BindgenContext) -> String { - let mut s = String::new(); - let level = 0; - self.push_disambiguated_name(ctx, &mut s, level); - s - } - - /// Helper function for full_disambiguated_name - fn push_disambiguated_name( - &self, - ctx: &BindgenContext, - to: &mut String, - level: u8, - ) { - to.push_str(&self.canonical_name(ctx)); - if let ItemKind::Type(ref ty) = *self.kind() { - if let TypeKind::TemplateInstantiation(ref inst) = *ty.kind() { - to.push_str(&format!("_open{}_", level)); - for arg in inst.template_arguments() { - arg.into_resolver() - .through_type_refs() - .resolve(ctx) - .push_disambiguated_name(ctx, to, level + 1); - to.push('_'); - } - to.push_str(&format!("close{}", level)); - } - } - } - - /// Get this function item's name, or `None` if this item is not a function. - fn func_name(&self) -> Option<&str> { - match *self.kind() { - ItemKind::Function(ref func) => Some(func.name()), - _ => None, - } - } - - /// Get the overload index for this method. If this is not a method, return - /// `None`. - fn overload_index(&self, ctx: &BindgenContext) -> Option { - self.func_name().and_then(|func_name| { - let parent = ctx.resolve_item(self.parent_id()); - if let ItemKind::Type(ref ty) = *parent.kind() { - if let TypeKind::Comp(ref ci) = *ty.kind() { - // All the constructors have the same name, so no need to - // resolve and check. - return ci - .constructors() - .iter() - .position(|c| *c == self.id()) - .or_else(|| { - ci.methods() - .iter() - .filter(|m| { - let item = ctx.resolve_item(m.signature()); - let func = item.expect_function(); - func.name() == func_name - }) - .position(|m| m.signature() == self.id()) - }); - } - } - - None - }) - } - - /// Get this item's base name (aka non-namespaced name). - fn base_name(&self, ctx: &BindgenContext) -> String { - if let Some(path) = self.annotations().use_instead_of() { - return path.last().unwrap().clone(); - } - - match *self.kind() { - ItemKind::Var(ref var) => var.name().to_owned(), - ItemKind::Module(ref module) => { - module.name().map(ToOwned::to_owned).unwrap_or_else(|| { - format!("_bindgen_mod_{}", self.exposed_id(ctx)) - }) - } - ItemKind::Type(ref ty) => { - ty.sanitized_name(ctx).map(Into::into).unwrap_or_else(|| { - format!("_bindgen_ty_{}", self.exposed_id(ctx)) - }) - } - ItemKind::Function(ref fun) => { - let mut name = fun.name().to_owned(); - - if let Some(idx) = self.overload_index(ctx) { - if idx > 0 { - write!(&mut name, "{}", idx).unwrap(); - } - } - - name - } - } - } - - fn is_anon(&self) -> bool { - match self.kind() { - ItemKind::Module(module) => module.name().is_none(), - ItemKind::Type(ty) => ty.name().is_none(), - ItemKind::Function(_) => false, - ItemKind::Var(_) => false, - } - } - - /// Get the canonical name without taking into account the replaces - /// annotation. - /// - /// This is the base logic used to implement hiding and replacing via - /// annotations, and also to implement proper name mangling. - /// - /// The idea is that each generated type in the same "level" (read: module - /// or namespace) has a unique canonical name. - /// - /// This name should be derived from the immutable state contained in the - /// type and the parent chain, since it should be consistent. - /// - /// If `BindgenOptions::disable_nested_struct_naming` is true then returned - /// name is the inner most non-anonymous name plus all the anonymous base names - /// that follows. - pub fn real_canonical_name( - &self, - ctx: &BindgenContext, - opt: &NameOptions, - ) -> String { - let target = ctx.resolve_item(self.name_target(ctx)); - - // Short-circuit if the target has an override, and just use that. - if let Some(path) = target.annotations.use_instead_of() { - if ctx.options().enable_cxx_namespaces { - return path.last().unwrap().clone(); - } - return path.join("_"); - } - - let base_name = target.base_name(ctx); - - // Named template type arguments are never namespaced, and never - // mangled. - if target.is_template_param(ctx, &()) { - return base_name; - } - - // Ancestors' id iter - let mut ids_iter = target - .parent_id() - .ancestors(ctx) - .filter(|id| *id != ctx.root_module()) - .take_while(|id| { - // Stop iterating ancestors once we reach a non-inline namespace - // when opt.within_namespaces is set. - !opt.within_namespaces || !ctx.resolve_item(*id).is_module() - }) - .filter(|id| { - if !ctx.options().conservative_inline_namespaces { - if let ItemKind::Module(ref module) = - *ctx.resolve_item(*id).kind() - { - return !module.is_inline(); - } - } - - true - }); - - let ids: Vec<_> = if ctx.options().disable_nested_struct_naming { - let mut ids = Vec::new(); - - // If target is anonymous we need find its first named ancestor. - if target.is_anon() { - for id in ids_iter.by_ref() { - ids.push(id); - - if !ctx.resolve_item(id).is_anon() { - break; - } - } - } - - ids - } else { - ids_iter.collect() - }; - - // Concatenate this item's ancestors' names together. - let mut names: Vec<_> = ids - .into_iter() - .map(|id| { - let item = ctx.resolve_item(id); - let target = ctx.resolve_item(item.name_target(ctx)); - target.base_name(ctx) - }) - .filter(|name| !name.is_empty()) - .collect(); - - names.reverse(); - - if !base_name.is_empty() { - names.push(base_name); - } - - if ctx.options().c_naming { - if let Some(prefix) = self.c_naming_prefix() { - names.insert(0, prefix.to_string()); - } - } - - let name = names.join("_"); - - let name = if opt.user_mangled == UserMangled::Yes { - ctx.parse_callbacks() - .and_then(|callbacks| callbacks.item_name(&name)) - .unwrap_or(name) - } else { - name - }; - - ctx.rust_mangle(&name).into_owned() - } - - /// The exposed id that represents an unique id among the siblings of a - /// given item. - pub fn exposed_id(&self, ctx: &BindgenContext) -> String { - // Only use local ids for enums, classes, structs and union types. All - // other items use their global id. - let ty_kind = self.kind().as_type().map(|t| t.kind()); - if let Some(ty_kind) = ty_kind { - match *ty_kind { - TypeKind::Comp(..) | - TypeKind::TemplateInstantiation(..) | - TypeKind::Enum(..) => return self.local_id(ctx).to_string(), - _ => {} - } - } - - // Note that this `id_` prefix prevents (really unlikely) collisions - // between the global id and the local id of an item with the same - // parent. - format!("id_{}", self.id().as_usize()) - } - - /// Get a reference to this item's `Module`, or `None` if this is not a - /// `Module` item. - pub fn as_module(&self) -> Option<&Module> { - match self.kind { - ItemKind::Module(ref module) => Some(module), - _ => None, - } - } - - /// Get a mutable reference to this item's `Module`, or `None` if this is - /// not a `Module` item. - pub fn as_module_mut(&mut self) -> Option<&mut Module> { - match self.kind { - ItemKind::Module(ref mut module) => Some(module), - _ => None, - } - } - - /// Returns whether the item is a constified module enum - fn is_constified_enum_module(&self, ctx: &BindgenContext) -> bool { - // Do not jump through aliases, except for aliases that point to a type - // with the same name, since we dont generate coe for them. - let item = self.id.into_resolver().through_type_refs().resolve(ctx); - let type_ = match *item.kind() { - ItemKind::Type(ref type_) => type_, - _ => return false, - }; - - match *type_.kind() { - TypeKind::Enum(ref enum_) => { - enum_.computed_enum_variation(ctx, self) == - EnumVariation::ModuleConsts - } - TypeKind::Alias(inner_id) => { - // TODO(emilio): Make this "hop through type aliases that aren't - // really generated" an option in `ItemResolver`? - let inner_item = ctx.resolve_item(inner_id); - let name = item.canonical_name(ctx); - - if inner_item.canonical_name(ctx) == name { - inner_item.is_constified_enum_module(ctx) - } else { - false - } - } - _ => false, - } - } - - /// Is this item of a kind that is enabled for code generation? - pub fn is_enabled_for_codegen(&self, ctx: &BindgenContext) -> bool { - let cc = &ctx.options().codegen_config; - match *self.kind() { - ItemKind::Module(..) => true, - ItemKind::Var(_) => cc.vars(), - ItemKind::Type(_) => cc.types(), - ItemKind::Function(ref f) => match f.kind() { - FunctionKind::Function => cc.functions(), - FunctionKind::Method(MethodKind::Constructor) => { - cc.constructors() - } - FunctionKind::Method(MethodKind::Destructor) | - FunctionKind::Method(MethodKind::VirtualDestructor { - .. - }) => cc.destructors(), - FunctionKind::Method(MethodKind::Static) | - FunctionKind::Method(MethodKind::Normal) | - FunctionKind::Method(MethodKind::Virtual { .. }) => { - cc.methods() - } - }, - } - } - - /// Returns the path we should use for allowlisting / blocklisting, which - /// doesn't include user-mangling. - pub fn path_for_allowlisting(&self, ctx: &BindgenContext) -> &Vec { - self.path_for_allowlisting - .borrow_with(|| self.compute_path(ctx, UserMangled::No)) - } - - fn compute_path( - &self, - ctx: &BindgenContext, - mangled: UserMangled, - ) -> Vec { - if let Some(path) = self.annotations().use_instead_of() { - let mut ret = - vec![ctx.resolve_item(ctx.root_module()).name(ctx).get()]; - ret.extend_from_slice(path); - return ret; - } - - let target = ctx.resolve_item(self.name_target(ctx)); - let mut path: Vec<_> = target - .ancestors(ctx) - .chain(iter::once(ctx.root_module().into())) - .map(|id| ctx.resolve_item(id)) - .filter(|item| { - item.id() == target.id() || - item.as_module().map_or(false, |module| { - !module.is_inline() || - ctx.options().conservative_inline_namespaces - }) - }) - .map(|item| { - ctx.resolve_item(item.name_target(ctx)) - .name(ctx) - .within_namespaces() - .user_mangled(mangled) - .get() - }) - .collect(); - path.reverse(); - path - } - - /// Returns a prefix for the canonical name when C naming is enabled. - fn c_naming_prefix(&self) -> Option<&str> { - let ty = match self.kind { - ItemKind::Type(ref ty) => ty, - _ => return None, - }; - - Some(match ty.kind() { - TypeKind::Comp(ref ci) => match ci.kind() { - CompKind::Struct => "struct", - CompKind::Union => "union", - }, - TypeKind::Enum(..) => "enum", - _ => return None, - }) - } -} - -impl IsOpaque for T -where - T: Copy + Into, -{ - type Extra = (); - - fn is_opaque(&self, ctx: &BindgenContext, _: &()) -> bool { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.resolve_item((*self).into()).is_opaque(ctx, &()) - } -} - -impl IsOpaque for Item { - type Extra = (); - - fn is_opaque(&self, ctx: &BindgenContext, _: &()) -> bool { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - self.annotations.opaque() || - self.as_type().map_or(false, |ty| ty.is_opaque(ctx, self)) || - ctx.opaque_by_name(self.path_for_allowlisting(ctx)) - } -} - -impl HasVtable for T -where - T: Copy + Into, -{ - fn has_vtable(&self, ctx: &BindgenContext) -> bool { - let id: ItemId = (*self).into(); - id.as_type_id(ctx).map_or(false, |id| { - !matches!(ctx.lookup_has_vtable(id), HasVtableResult::No) - }) - } - - fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool { - let id: ItemId = (*self).into(); - id.as_type_id(ctx).map_or(false, |id| { - matches!(ctx.lookup_has_vtable(id), HasVtableResult::SelfHasVtable) - }) - } -} - -impl HasVtable for Item { - fn has_vtable(&self, ctx: &BindgenContext) -> bool { - self.id().has_vtable(ctx) - } - - fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool { - self.id().has_vtable_ptr(ctx) - } -} - -impl Sizedness for T -where - T: Copy + Into, -{ - fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult { - let id: ItemId = (*self).into(); - id.as_type_id(ctx) - .map_or(SizednessResult::default(), |id| ctx.lookup_sizedness(id)) - } -} - -impl Sizedness for Item { - fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult { - self.id().sizedness(ctx) - } -} - -impl HasTypeParamInArray for T -where - T: Copy + Into, -{ - fn has_type_param_in_array(&self, ctx: &BindgenContext) -> bool { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.lookup_has_type_param_in_array(*self) - } -} - -impl HasTypeParamInArray for Item { - fn has_type_param_in_array(&self, ctx: &BindgenContext) -> bool { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.lookup_has_type_param_in_array(self.id()) - } -} - -impl HasFloat for T -where - T: Copy + Into, -{ - fn has_float(&self, ctx: &BindgenContext) -> bool { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.lookup_has_float(*self) - } -} - -impl HasFloat for Item { - fn has_float(&self, ctx: &BindgenContext) -> bool { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.lookup_has_float(self.id()) - } -} - -/// A set of items. -pub type ItemSet = BTreeSet; - -impl DotAttributes for Item { - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!( - out, - "{:?} - name{}", - self.id, - self.name(ctx).get() - )?; - - if self.is_opaque(ctx, &()) { - writeln!(out, "opaquetrue")?; - } - - self.kind.dot_attributes(ctx, out) - } -} - -impl TemplateParameters for T -where - T: Copy + Into, -{ - fn self_template_params(&self, ctx: &BindgenContext) -> Vec { - ctx.resolve_item_fallible(*self) - .map_or(vec![], |item| item.self_template_params(ctx)) - } -} - -impl TemplateParameters for Item { - fn self_template_params(&self, ctx: &BindgenContext) -> Vec { - self.kind.self_template_params(ctx) - } -} - -impl TemplateParameters for ItemKind { - fn self_template_params(&self, ctx: &BindgenContext) -> Vec { - match *self { - ItemKind::Type(ref ty) => ty.self_template_params(ctx), - // If we start emitting bindings to explicitly instantiated - // functions, then we'll need to check ItemKind::Function for - // template params. - ItemKind::Function(_) | ItemKind::Module(_) | ItemKind::Var(_) => { - vec![] - } - } - } -} - -// An utility function to handle recursing inside nested types. -fn visit_child( - cur: clang::Cursor, - id: ItemId, - ty: &clang::Type, - parent_id: Option, - ctx: &mut BindgenContext, - result: &mut Result, -) -> clang_sys::CXChildVisitResult { - use clang_sys::*; - if result.is_ok() { - return CXChildVisit_Break; - } - - *result = Item::from_ty_with_id(id, ty, cur, parent_id, ctx); - - match *result { - Ok(..) => CXChildVisit_Break, - Err(ParseError::Recurse) => { - cur.visit(|c| visit_child(c, id, ty, parent_id, ctx, result)); - CXChildVisit_Continue - } - Err(ParseError::Continue) => CXChildVisit_Continue, - } -} - -impl ClangItemParser for Item { - fn builtin_type( - kind: TypeKind, - is_const: bool, - ctx: &mut BindgenContext, - ) -> TypeId { - // Feel free to add more here, I'm just lazy. - match kind { - TypeKind::Void | - TypeKind::Int(..) | - TypeKind::Pointer(..) | - TypeKind::Float(..) => {} - _ => panic!("Unsupported builtin type"), - } - - let ty = Type::new(None, None, kind, is_const); - let id = ctx.next_item_id(); - let module = ctx.root_module().into(); - ctx.add_item( - Item::new(id, None, None, module, ItemKind::Type(ty), None), - None, - None, - ); - id.as_type_id_unchecked() - } - - fn parse( - cursor: clang::Cursor, - parent_id: Option, - ctx: &mut BindgenContext, - ) -> Result { - use crate::ir::var::Var; - use clang_sys::*; - - if !cursor.is_valid() { - return Err(ParseError::Continue); - } - - let comment = cursor.raw_comment(); - let annotations = Annotations::new(&cursor); - - let current_module = ctx.current_module().into(); - let relevant_parent_id = parent_id.unwrap_or(current_module); - - macro_rules! try_parse { - ($what:ident) => { - match $what::parse(cursor, ctx) { - Ok(ParseResult::New(item, declaration)) => { - let id = ctx.next_item_id(); - - ctx.add_item( - Item::new( - id, - comment, - annotations, - relevant_parent_id, - ItemKind::$what(item), - Some(cursor.location()), - ), - declaration, - Some(cursor), - ); - return Ok(id); - } - Ok(ParseResult::AlreadyResolved(id)) => { - return Ok(id); - } - Err(ParseError::Recurse) => return Err(ParseError::Recurse), - Err(ParseError::Continue) => {} - } - }; - } - - try_parse!(Module); - - // NOTE: Is extremely important to parse functions and vars **before** - // types. Otherwise we can parse a function declaration as a type - // (which is legal), and lose functions to generate. - // - // In general, I'm not totally confident this split between - // ItemKind::Function and TypeKind::FunctionSig is totally worth it, but - // I guess we can try. - try_parse!(Function); - try_parse!(Var); - - // Types are sort of special, so to avoid parsing template classes - // twice, handle them separately. - { - let definition = cursor.definition(); - let applicable_cursor = definition.unwrap_or(cursor); - - let relevant_parent_id = match definition { - Some(definition) => { - if definition != cursor { - ctx.add_semantic_parent(definition, relevant_parent_id); - return Ok(Item::from_ty_or_ref( - applicable_cursor.cur_type(), - cursor, - parent_id, - ctx, - ) - .into()); - } - ctx.known_semantic_parent(definition) - .or(parent_id) - .unwrap_or_else(|| ctx.current_module().into()) - } - None => relevant_parent_id, - }; - - match Item::from_ty( - &applicable_cursor.cur_type(), - applicable_cursor, - Some(relevant_parent_id), - ctx, - ) { - Ok(ty) => return Ok(ty.into()), - Err(ParseError::Recurse) => return Err(ParseError::Recurse), - Err(ParseError::Continue) => {} - } - } - - // Guess how does clang treat extern "C" blocks? - if cursor.kind() == CXCursor_UnexposedDecl { - Err(ParseError::Recurse) - } else { - // We allowlist cursors here known to be unhandled, to prevent being - // too noisy about this. - match cursor.kind() { - CXCursor_MacroDefinition | - CXCursor_MacroExpansion | - CXCursor_UsingDeclaration | - CXCursor_UsingDirective | - CXCursor_StaticAssert | - CXCursor_FunctionTemplate => { - debug!( - "Unhandled cursor kind {:?}: {:?}", - cursor.kind(), - cursor - ); - } - CXCursor_InclusionDirective => { - let file = cursor.get_included_file_name(); - match file { - None => { - warn!( - "Inclusion of a nameless file in {:?}", - cursor - ); - } - Some(filename) => { - ctx.include_file(filename); - } - } - } - _ => { - // ignore toplevel operator overloads - let spelling = cursor.spelling(); - if !spelling.starts_with("operator") { - warn!( - "Unhandled cursor kind {:?}: {:?}", - cursor.kind(), - cursor - ); - } - } - } - - Err(ParseError::Continue) - } - } - - fn from_ty_or_ref( - ty: clang::Type, - location: clang::Cursor, - parent_id: Option, - ctx: &mut BindgenContext, - ) -> TypeId { - let id = ctx.next_item_id(); - Self::from_ty_or_ref_with_id(id, ty, location, parent_id, ctx) - } - - /// Parse a C++ type. If we find a reference to a type that has not been - /// defined yet, use `UnresolvedTypeRef` as a placeholder. - /// - /// This logic is needed to avoid parsing items with the incorrect parent - /// and it's sort of complex to explain, so I'll just point to - /// `tests/headers/typeref.hpp` to see the kind of constructs that forced - /// this. - /// - /// Typerefs are resolved once parsing is completely done, see - /// `BindgenContext::resolve_typerefs`. - fn from_ty_or_ref_with_id( - potential_id: ItemId, - ty: clang::Type, - location: clang::Cursor, - parent_id: Option, - ctx: &mut BindgenContext, - ) -> TypeId { - debug!( - "from_ty_or_ref_with_id: {:?} {:?}, {:?}, {:?}", - potential_id, ty, location, parent_id - ); - - if ctx.collected_typerefs() { - debug!("refs already collected, resolving directly"); - return Item::from_ty_with_id( - potential_id, - &ty, - location, - parent_id, - ctx, - ) - .unwrap_or_else(|_| Item::new_opaque_type(potential_id, &ty, ctx)); - } - - if let Some(ty) = ctx.builtin_or_resolved_ty( - potential_id, - parent_id, - &ty, - Some(location), - ) { - debug!("{:?} already resolved: {:?}", ty, location); - return ty; - } - - debug!("New unresolved type reference: {:?}, {:?}", ty, location); - - let is_const = ty.is_const(); - let kind = TypeKind::UnresolvedTypeRef(ty, location, parent_id); - let current_module = ctx.current_module(); - - ctx.add_item( - Item::new( - potential_id, - None, - None, - parent_id.unwrap_or_else(|| current_module.into()), - ItemKind::Type(Type::new(None, None, kind, is_const)), - Some(location.location()), - ), - None, - None, - ); - potential_id.as_type_id_unchecked() - } - - fn from_ty( - ty: &clang::Type, - location: clang::Cursor, - parent_id: Option, - ctx: &mut BindgenContext, - ) -> Result { - let id = ctx.next_item_id(); - Item::from_ty_with_id(id, ty, location, parent_id, ctx) - } - - /// This is one of the trickiest methods you'll find (probably along with - /// some of the ones that handle templates in `BindgenContext`). - /// - /// This method parses a type, given the potential id of that type (if - /// parsing it was correct), an optional location we're scanning, which is - /// critical some times to obtain information, an optional parent item id, - /// that will, if it's `None`, become the current module id, and the - /// context. - fn from_ty_with_id( - id: ItemId, - ty: &clang::Type, - location: clang::Cursor, - parent_id: Option, - ctx: &mut BindgenContext, - ) -> Result { - use clang_sys::*; - - debug!( - "Item::from_ty_with_id: {:?}\n\ - \tty = {:?},\n\ - \tlocation = {:?}", - id, ty, location - ); - - if ty.kind() == clang_sys::CXType_Unexposed || - location.cur_type().kind() == clang_sys::CXType_Unexposed - { - if ty.is_associated_type() || - location.cur_type().is_associated_type() - { - return Ok(Item::new_opaque_type(id, ty, ctx)); - } - - if let Some(param_id) = Item::type_param(None, location, ctx) { - return Ok(ctx.build_ty_wrapper(id, param_id, None, ty)); - } - } - - // Treat all types that are declared inside functions as opaque. The Rust binding - // won't be able to do anything with them anyway. - // - // (If we don't do this check here, we can have subtle logic bugs because we generally - // ignore function bodies. See issue #2036.) - if let Some(ref parent) = ty.declaration().fallible_semantic_parent() { - if FunctionKind::from_cursor(parent).is_some() { - debug!("Skipping type declared inside function: {:?}", ty); - return Ok(Item::new_opaque_type(id, ty, ctx)); - } - } - - let decl = { - let canonical_def = ty.canonical_type().declaration().definition(); - canonical_def.unwrap_or_else(|| ty.declaration()) - }; - - let comment = decl.raw_comment().or_else(|| location.raw_comment()); - let annotations = - Annotations::new(&decl).or_else(|| Annotations::new(&location)); - - if let Some(ref annotations) = annotations { - if let Some(replaced) = annotations.use_instead_of() { - ctx.replace(replaced, id); - } - } - - if let Some(ty) = - ctx.builtin_or_resolved_ty(id, parent_id, ty, Some(location)) - { - return Ok(ty); - } - - // First, check we're not recursing. - let mut valid_decl = decl.kind() != CXCursor_NoDeclFound; - let declaration_to_look_for = if valid_decl { - decl.canonical() - } else if location.kind() == CXCursor_ClassTemplate { - valid_decl = true; - location - } else { - decl - }; - - if valid_decl { - if let Some(partial) = ctx - .currently_parsed_types() - .iter() - .find(|ty| *ty.decl() == declaration_to_look_for) - { - debug!("Avoiding recursion parsing type: {:?}", ty); - // Unchecked because we haven't finished this type yet. - return Ok(partial.id().as_type_id_unchecked()); - } - } - - let current_module = ctx.current_module().into(); - let partial_ty = PartialType::new(declaration_to_look_for, id); - if valid_decl { - ctx.begin_parsing(partial_ty); - } - - let result = Type::from_clang_ty(id, ty, location, parent_id, ctx); - let relevant_parent_id = parent_id.unwrap_or(current_module); - let ret = match result { - Ok(ParseResult::AlreadyResolved(ty)) => { - Ok(ty.as_type_id_unchecked()) - } - Ok(ParseResult::New(item, declaration)) => { - ctx.add_item( - Item::new( - id, - comment, - annotations, - relevant_parent_id, - ItemKind::Type(item), - Some(location.location()), - ), - declaration, - Some(location), - ); - Ok(id.as_type_id_unchecked()) - } - Err(ParseError::Continue) => Err(ParseError::Continue), - Err(ParseError::Recurse) => { - debug!("Item::from_ty recursing in the ast"); - let mut result = Err(ParseError::Recurse); - - // Need to pop here, otherwise we'll get stuck. - // - // TODO: Find a nicer interface, really. Also, the - // declaration_to_look_for suspiciously shares a lot of - // logic with ir::context, so we should refactor that. - if valid_decl { - let finished = ctx.finish_parsing(); - assert_eq!(*finished.decl(), declaration_to_look_for); - } - - location.visit(|cur| { - visit_child(cur, id, ty, parent_id, ctx, &mut result) - }); - - if valid_decl { - let partial_ty = - PartialType::new(declaration_to_look_for, id); - ctx.begin_parsing(partial_ty); - } - - // If we have recursed into the AST all we know, and we still - // haven't found what we've got, let's just try and make a named - // type. - // - // This is what happens with some template members, for example. - if let Err(ParseError::Recurse) = result { - warn!( - "Unknown type, assuming named template type: \ - id = {:?}; spelling = {}", - id, - ty.spelling() - ); - Item::type_param(Some(id), location, ctx) - .map(Ok) - .unwrap_or(Err(ParseError::Recurse)) - } else { - result - } - } - }; - - if valid_decl { - let partial_ty = ctx.finish_parsing(); - assert_eq!(*partial_ty.decl(), declaration_to_look_for); - } - - ret - } - - /// A named type is a template parameter, e.g., the "T" in Foo. They're - /// always local so it's the only exception when there's no declaration for - /// a type. - fn type_param( - with_id: Option, - location: clang::Cursor, - ctx: &mut BindgenContext, - ) -> Option { - let ty = location.cur_type(); - - debug!( - "Item::type_param:\n\ - \twith_id = {:?},\n\ - \tty = {} {:?},\n\ - \tlocation: {:?}", - with_id, - ty.spelling(), - ty, - location - ); - - if ty.kind() != clang_sys::CXType_Unexposed { - // If the given cursor's type's kind is not Unexposed, then we - // aren't looking at a template parameter. This check may need to be - // updated in the future if they start properly exposing template - // type parameters. - return None; - } - - let ty_spelling = ty.spelling(); - - // Clang does not expose any information about template type parameters - // via their clang::Type, nor does it give us their canonical cursors - // the straightforward way. However, there are three situations from - // which we can find the definition of the template type parameter, if - // the cursor is indeed looking at some kind of a template type - // parameter or use of one: - // - // 1. The cursor is pointing at the template type parameter's - // definition. This is the trivial case. - // - // (kind = TemplateTypeParameter, ...) - // - // 2. The cursor is pointing at a TypeRef whose referenced() cursor is - // situation (1). - // - // (kind = TypeRef, - // referenced = (kind = TemplateTypeParameter, ...), - // ...) - // - // 3. The cursor is pointing at some use of a template type parameter - // (for example, in a FieldDecl), and this cursor has a child cursor - // whose spelling is the same as the parent's type's spelling, and whose - // kind is a TypeRef of the situation (2) variety. - // - // (kind = FieldDecl, - // type = (kind = Unexposed, - // spelling = "T", - // ...), - // children = - // (kind = TypeRef, - // spelling = "T", - // referenced = (kind = TemplateTypeParameter, - // spelling = "T", - // ...), - // ...) - // ...) - // - // TODO: The alternative to this hacky pattern matching would be to - // maintain proper scopes of template parameters while parsing and use - // de Brujin indices to access template parameters, which clang exposes - // in the cursor's type's canonical type's spelling: - // "type-parameter-x-y". That is probably a better approach long-term, - // but maintaining these scopes properly would require more changes to - // the whole libclang -> IR parsing code. - - fn is_template_with_spelling( - refd: &clang::Cursor, - spelling: &str, - ) -> bool { - lazy_static! { - static ref ANON_TYPE_PARAM_RE: regex::Regex = - regex::Regex::new(r"^type\-parameter\-\d+\-\d+$").unwrap(); - } - - if refd.kind() != clang_sys::CXCursor_TemplateTypeParameter { - return false; - } - - let refd_spelling = refd.spelling(); - refd_spelling == spelling || - // Allow for anonymous template parameters. - (refd_spelling.is_empty() && ANON_TYPE_PARAM_RE.is_match(spelling.as_ref())) - } - - let definition = if is_template_with_spelling(&location, &ty_spelling) { - // Situation (1) - location - } else if location.kind() == clang_sys::CXCursor_TypeRef { - // Situation (2) - match location.referenced() { - Some(refd) - if is_template_with_spelling(&refd, &ty_spelling) => - { - refd - } - _ => return None, - } - } else { - // Situation (3) - let mut definition = None; - - location.visit(|child| { - let child_ty = child.cur_type(); - if child_ty.kind() == clang_sys::CXCursor_TypeRef && - child_ty.spelling() == ty_spelling - { - match child.referenced() { - Some(refd) - if is_template_with_spelling( - &refd, - &ty_spelling, - ) => - { - definition = Some(refd); - return clang_sys::CXChildVisit_Break; - } - _ => {} - } - } - - clang_sys::CXChildVisit_Continue - }); - - definition? - }; - assert!(is_template_with_spelling(&definition, &ty_spelling)); - - // Named types are always parented to the root module. They are never - // referenced with namespace prefixes, and they can't inherit anything - // from their parent either, so it is simplest to just hang them off - // something we know will always exist. - let parent = ctx.root_module().into(); - - if let Some(id) = ctx.get_type_param(&definition) { - if let Some(with_id) = with_id { - return Some(ctx.build_ty_wrapper( - with_id, - id, - Some(parent), - &ty, - )); - } else { - return Some(id); - } - } - - // See tests/headers/const_tparam.hpp and - // tests/headers/variadic_tname.hpp. - let name = ty_spelling.replace("const ", "").replace(".", ""); - - let id = with_id.unwrap_or_else(|| ctx.next_item_id()); - let item = Item::new( - id, - None, - None, - parent, - ItemKind::Type(Type::named(name)), - Some(location.location()), - ); - ctx.add_type_param(item, definition); - Some(id.as_type_id_unchecked()) - } -} - -impl ItemCanonicalName for Item { - fn canonical_name(&self, ctx: &BindgenContext) -> String { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - self.canonical_name - .borrow_with(|| { - let in_namespace = ctx.options().enable_cxx_namespaces || - ctx.options().disable_name_namespacing; - - if in_namespace { - self.name(ctx).within_namespaces().get() - } else { - self.name(ctx).get() - } - }) - .clone() - } -} - -impl ItemCanonicalPath for Item { - fn namespace_aware_canonical_path( - &self, - ctx: &BindgenContext, - ) -> Vec { - let mut path = self.canonical_path(ctx); - - // ASSUMPTION: (disable_name_namespacing && cxx_namespaces) - // is equivalent to - // disable_name_namespacing - if ctx.options().disable_name_namespacing { - // Only keep the last item in path - let split_idx = path.len() - 1; - path = path.split_off(split_idx); - } else if !ctx.options().enable_cxx_namespaces { - // Ignore first item "root" - path = vec![path[1..].join("_")]; - } - - if self.is_constified_enum_module(ctx) { - path.push(CONSTIFIED_ENUM_MODULE_REPR_NAME.into()); - } - - path - } - - fn canonical_path(&self, ctx: &BindgenContext) -> Vec { - self.compute_path(ctx, UserMangled::Yes) - } -} - -/// Whether to use the user-mangled name (mangled by the `item_name` callback or -/// not. -/// -/// Most of the callers probably want just yes, but the ones dealing with -/// allowlisting and blocklisting don't. -#[derive(Copy, Clone, Debug, PartialEq)] -enum UserMangled { - No, - Yes, -} - -/// Builder struct for naming variations, which hold inside different -/// flags for naming options. -#[derive(Debug)] -pub struct NameOptions<'a> { - item: &'a Item, - ctx: &'a BindgenContext, - within_namespaces: bool, - user_mangled: UserMangled, -} - -impl<'a> NameOptions<'a> { - /// Construct a new `NameOptions` - pub fn new(item: &'a Item, ctx: &'a BindgenContext) -> Self { - NameOptions { - item, - ctx, - within_namespaces: false, - user_mangled: UserMangled::Yes, - } - } - - /// Construct the name without the item's containing C++ namespaces mangled - /// into it. In other words, the item's name within the item's namespace. - pub fn within_namespaces(&mut self) -> &mut Self { - self.within_namespaces = true; - self - } - - fn user_mangled(&mut self, user_mangled: UserMangled) -> &mut Self { - self.user_mangled = user_mangled; - self - } - - /// Construct a name `String` - pub fn get(&self) -> String { - self.item.real_canonical_name(self.ctx, self) - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/layout.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/layout.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/layout.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/layout.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,143 +0,0 @@ -//! Intermediate representation for the physical layout of some type. - -use super::derive::CanDerive; -use super::ty::{Type, TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT}; -use crate::clang; -use crate::ir::context::BindgenContext; -use std::cmp; - -/// A type that represents the struct layout of a type. -#[derive(Debug, Clone, Copy, PartialEq)] -pub struct Layout { - /// The size (in bytes) of this layout. - pub size: usize, - /// The alignment (in bytes) of this layout. - pub align: usize, - /// Whether this layout's members are packed or not. - pub packed: bool, -} - -#[test] -fn test_layout_for_size() { - use std::mem; - - let ptr_size = mem::size_of::<*mut ()>(); - assert_eq!( - Layout::for_size_internal(ptr_size, ptr_size), - Layout::new(ptr_size, ptr_size) - ); - assert_eq!( - Layout::for_size_internal(ptr_size, 3 * ptr_size), - Layout::new(3 * ptr_size, ptr_size) - ); -} - -impl Layout { - /// Gets the integer type name for a given known size. - pub fn known_type_for_size( - ctx: &BindgenContext, - size: usize, - ) -> Option<&'static str> { - Some(match size { - 16 if ctx.options().rust_features.i128_and_u128 => "u128", - 8 => "u64", - 4 => "u32", - 2 => "u16", - 1 => "u8", - _ => return None, - }) - } - - /// Construct a new `Layout` with the given `size` and `align`. It is not - /// packed. - pub fn new(size: usize, align: usize) -> Self { - Layout { - size, - align, - packed: false, - } - } - - fn for_size_internal(ptr_size: usize, size: usize) -> Self { - let mut next_align = 2; - while size % next_align == 0 && next_align <= ptr_size { - next_align *= 2; - } - Layout { - size, - align: next_align / 2, - packed: false, - } - } - - /// Creates a non-packed layout for a given size, trying to use the maximum - /// alignment possible. - pub fn for_size(ctx: &BindgenContext, size: usize) -> Self { - Self::for_size_internal(ctx.target_pointer_size(), size) - } - - /// Is this a zero-sized layout? - pub fn is_zero(&self) -> bool { - self.size == 0 && self.align == 0 - } - - /// Construct a zero-sized layout. - pub fn zero() -> Self { - Self::new(0, 0) - } - - /// Get this layout as an opaque type. - pub fn opaque(&self) -> Opaque { - Opaque(*self) - } -} - -/// When we are treating a type as opaque, it is just a blob with a `Layout`. -#[derive(Clone, Debug, PartialEq)] -pub struct Opaque(pub Layout); - -impl Opaque { - /// Construct a new opaque type from the given clang type. - pub fn from_clang_ty(ty: &clang::Type, ctx: &BindgenContext) -> Type { - let layout = Layout::new(ty.size(ctx), ty.align(ctx)); - let ty_kind = TypeKind::Opaque; - let is_const = ty.is_const(); - Type::new(None, Some(layout), ty_kind, is_const) - } - - /// Return the known rust type we should use to create a correctly-aligned - /// field with this layout. - pub fn known_rust_type_for_array( - &self, - ctx: &BindgenContext, - ) -> Option<&'static str> { - Layout::known_type_for_size(ctx, self.0.align) - } - - /// Return the array size that an opaque type for this layout should have if - /// we know the correct type for it, or `None` otherwise. - pub fn array_size(&self, ctx: &BindgenContext) -> Option { - if self.known_rust_type_for_array(ctx).is_some() { - Some(self.0.size / cmp::max(self.0.align, 1)) - } else { - None - } - } - - /// Return `true` if this opaque layout's array size will fit within the - /// maximum number of array elements that Rust allows deriving traits - /// with. Return `false` otherwise. - pub fn array_size_within_derive_limit( - &self, - ctx: &BindgenContext, - ) -> CanDerive { - if self - .array_size(ctx) - .map_or(false, |size| size <= RUST_DERIVE_IN_ARRAY_LIMIT) - { - CanDerive::Yes - } else { - CanDerive::Manually - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/mod.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -//! The ir module defines bindgen's intermediate representation. -//! -//! Parsing C/C++ generates the IR, while code generation outputs Rust code from -//! the IR. - -pub mod analysis; -pub mod annotations; -pub mod comment; -pub mod comp; -pub mod context; -pub mod derive; -pub mod dot; -pub mod enum_ty; -pub mod function; -pub mod int; -pub mod item; -pub mod item_kind; -pub mod layout; -pub mod module; -pub mod objc; -pub mod template; -pub mod traversal; -pub mod ty; -pub mod var; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/module.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/module.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/module.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/module.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -//! Intermediate representation for modules (AKA C++ namespaces). - -use super::context::BindgenContext; -use super::dot::DotAttributes; -use super::item::ItemSet; -use crate::clang; -use crate::parse::{ClangSubItemParser, ParseError, ParseResult}; -use crate::parse_one; -use std::io; - -/// Whether this module is inline or not. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum ModuleKind { - /// This module is not inline. - Normal, - /// This module is inline, as in `inline namespace foo {}`. - Inline, -} - -/// A module, as in, a C++ namespace. -#[derive(Clone, Debug)] -pub struct Module { - /// The name of the module, or none if it's anonymous. - name: Option, - /// The kind of module this is. - kind: ModuleKind, - /// The children of this module, just here for convenience. - children: ItemSet, -} - -impl Module { - /// Construct a new `Module`. - pub fn new(name: Option, kind: ModuleKind) -> Self { - Module { - name, - kind, - children: ItemSet::new(), - } - } - - /// Get this module's name. - pub fn name(&self) -> Option<&str> { - self.name.as_deref() - } - - /// Get a mutable reference to this module's children. - pub fn children_mut(&mut self) -> &mut ItemSet { - &mut self.children - } - - /// Get this module's children. - pub fn children(&self) -> &ItemSet { - &self.children - } - - /// Whether this namespace is inline. - pub fn is_inline(&self) -> bool { - self.kind == ModuleKind::Inline - } -} - -impl DotAttributes for Module { - fn dot_attributes( - &self, - _ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!(out, "ModuleKind{:?}", self.kind) - } -} - -impl ClangSubItemParser for Module { - fn parse( - cursor: clang::Cursor, - ctx: &mut BindgenContext, - ) -> Result, ParseError> { - use clang_sys::*; - match cursor.kind() { - CXCursor_Namespace => { - let module_id = ctx.module(cursor); - ctx.with_module(module_id, |ctx| { - cursor.visit(|cursor| { - parse_one(ctx, cursor, Some(module_id.into())) - }) - }); - - Ok(ParseResult::AlreadyResolved(module_id.into())) - } - _ => Err(ParseError::Continue), - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/objc.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/objc.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/objc.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/objc.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,329 +0,0 @@ -//! Objective C types - -use super::context::{BindgenContext, ItemId}; -use super::function::FunctionSig; -use super::item::Item; -use super::traversal::{Trace, Tracer}; -use super::ty::TypeKind; -use crate::clang; -use crate::parse::ClangItemParser; -use clang_sys::CXChildVisit_Continue; -use clang_sys::CXCursor_ObjCCategoryDecl; -use clang_sys::CXCursor_ObjCClassMethodDecl; -use clang_sys::CXCursor_ObjCClassRef; -use clang_sys::CXCursor_ObjCInstanceMethodDecl; -use clang_sys::CXCursor_ObjCProtocolDecl; -use clang_sys::CXCursor_ObjCProtocolRef; -use clang_sys::CXCursor_ObjCSuperClassRef; -use clang_sys::CXCursor_TemplateTypeParameter; -use proc_macro2::{Ident, Span, TokenStream}; - -/// Objective C interface as used in TypeKind -/// -/// Also protocols and categories are parsed as this type -#[derive(Debug)] -pub struct ObjCInterface { - /// The name - /// like, NSObject - name: String, - - category: Option, - - is_protocol: bool, - - /// The list of template names almost always, ObjectType or KeyType - pub template_names: Vec, - - /// The list of protocols that this interface conforms to. - pub conforms_to: Vec, - - /// The direct parent for this interface. - pub parent_class: Option, - - /// List of the methods defined in this interfae - methods: Vec, - - class_methods: Vec, -} - -/// The objective c methods -#[derive(Debug)] -pub struct ObjCMethod { - /// The original method selector name - /// like, dataWithBytes:length: - name: String, - - /// Method name as converted to rust - /// like, dataWithBytes_length_ - rust_name: String, - - signature: FunctionSig, - - /// Is class method? - is_class_method: bool, -} - -impl ObjCInterface { - fn new(name: &str) -> ObjCInterface { - ObjCInterface { - name: name.to_owned(), - category: None, - is_protocol: false, - template_names: Vec::new(), - parent_class: None, - conforms_to: Vec::new(), - methods: Vec::new(), - class_methods: Vec::new(), - } - } - - /// The name - /// like, NSObject - pub fn name(&self) -> &str { - self.name.as_ref() - } - - /// Formats the name for rust - /// Can be like NSObject, but with categories might be like NSObject_NSCoderMethods - /// and protocols are like PNSObject - pub fn rust_name(&self) -> String { - if let Some(ref cat) = self.category { - format!("{}_{}", self.name(), cat) - } else if self.is_protocol { - format!("P{}", self.name()) - } else { - format!("I{}", self.name().to_owned()) - } - } - - /// Is this a template interface? - pub fn is_template(&self) -> bool { - !self.template_names.is_empty() - } - - /// List of the methods defined in this interface - pub fn methods(&self) -> &Vec { - &self.methods - } - - /// Is this a protocol? - pub fn is_protocol(&self) -> bool { - self.is_protocol - } - - /// Is this a category? - pub fn is_category(&self) -> bool { - self.category.is_some() - } - - /// List of the class methods defined in this interface - pub fn class_methods(&self) -> &Vec { - &self.class_methods - } - - /// Parses the Objective C interface from the cursor - pub fn from_ty( - cursor: &clang::Cursor, - ctx: &mut BindgenContext, - ) -> Option { - let name = cursor.spelling(); - let mut interface = Self::new(&name); - - if cursor.kind() == CXCursor_ObjCProtocolDecl { - interface.is_protocol = true; - } - - cursor.visit(|c| { - match c.kind() { - CXCursor_ObjCClassRef => { - if cursor.kind() == CXCursor_ObjCCategoryDecl { - // We are actually a category extension, and we found the reference - // to the original interface, so name this interface approriately - interface.name = c.spelling(); - interface.category = Some(cursor.spelling()); - } - } - CXCursor_ObjCProtocolRef => { - // Gather protocols this interface conforms to - let needle = format!("P{}", c.spelling()); - let items_map = ctx.items(); - debug!( - "Interface {} conforms to {}, find the item", - interface.name, needle - ); - - for (id, item) in items_map { - if let Some(ty) = item.as_type() { - if let TypeKind::ObjCInterface(ref protocol) = - *ty.kind() - { - if protocol.is_protocol { - debug!( - "Checking protocol {}, ty.name {:?}", - protocol.name, - ty.name() - ); - if Some(needle.as_ref()) == ty.name() { - debug!( - "Found conforming protocol {:?}", - item - ); - interface.conforms_to.push(id); - break; - } - } - } - } - } - } - CXCursor_ObjCInstanceMethodDecl | - CXCursor_ObjCClassMethodDecl => { - let name = c.spelling(); - let signature = - FunctionSig::from_ty(&c.cur_type(), &c, ctx) - .expect("Invalid function sig"); - let is_class_method = - c.kind() == CXCursor_ObjCClassMethodDecl; - let method = - ObjCMethod::new(&name, signature, is_class_method); - interface.add_method(method); - } - CXCursor_TemplateTypeParameter => { - let name = c.spelling(); - interface.template_names.push(name); - } - CXCursor_ObjCSuperClassRef => { - let item = Item::from_ty_or_ref(c.cur_type(), c, None, ctx); - interface.parent_class = Some(item.into()); - } - _ => {} - } - CXChildVisit_Continue - }); - Some(interface) - } - - fn add_method(&mut self, method: ObjCMethod) { - if method.is_class_method { - self.class_methods.push(method); - } else { - self.methods.push(method); - } - } -} - -impl ObjCMethod { - fn new( - name: &str, - signature: FunctionSig, - is_class_method: bool, - ) -> ObjCMethod { - let split_name: Vec<&str> = name.split(':').collect(); - - let rust_name = split_name.join("_"); - - ObjCMethod { - name: name.to_owned(), - rust_name, - signature, - is_class_method, - } - } - - /// The original method selector name - /// like, dataWithBytes:length: - pub fn name(&self) -> &str { - self.name.as_ref() - } - - /// Method name as converted to rust - /// like, dataWithBytes_length_ - pub fn rust_name(&self) -> &str { - self.rust_name.as_ref() - } - - /// Returns the methods signature as FunctionSig - pub fn signature(&self) -> &FunctionSig { - &self.signature - } - - /// Is this a class method? - pub fn is_class_method(&self) -> bool { - self.is_class_method - } - - /// Formats the method call - pub fn format_method_call(&self, args: &[TokenStream]) -> TokenStream { - let split_name: Vec> = self - .name - .split(':') - .map(|name| { - if name.is_empty() { - None - } else { - Some(Ident::new(name, Span::call_site())) - } - }) - .collect(); - - // No arguments - if args.is_empty() && split_name.len() == 1 { - let name = &split_name[0]; - return quote! { - #name - }; - } - - // Check right amount of arguments - assert!( - args.len() == split_name.len() - 1, - "Incorrect method name or arguments for objc method, {:?} vs {:?}", - args, - split_name - ); - - // Get arguments without type signatures to pass to `msg_send!` - let mut args_without_types = vec![]; - for arg in args.iter() { - let arg = arg.to_string(); - let name_and_sig: Vec<&str> = arg.split(' ').collect(); - let name = name_and_sig[0]; - args_without_types.push(Ident::new(name, Span::call_site())) - } - - let args = split_name.into_iter().zip(args_without_types).map( - |(arg, arg_val)| { - if let Some(arg) = arg { - quote! { #arg: #arg_val } - } else { - quote! { #arg_val: #arg_val } - } - }, - ); - - quote! { - #( #args )* - } - } -} - -impl Trace for ObjCInterface { - type Extra = (); - - fn trace(&self, context: &BindgenContext, tracer: &mut T, _: &()) - where - T: Tracer, - { - for method in &self.methods { - method.signature.trace(context, tracer, &()); - } - - for class_method in &self.class_methods { - class_method.signature.trace(context, tracer, &()); - } - - for protocol in &self.conforms_to { - tracer.visit(*protocol); - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/template.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/template.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/template.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/template.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,343 +0,0 @@ -//! Template declaration and instantiation related things. -//! -//! The nomenclature surrounding templates is often confusing, so here are a few -//! brief definitions: -//! -//! * "Template definition": a class/struct/alias/function definition that takes -//! generic template parameters. For example: -//! -//! ```c++ -//! template -//! class List { -//! // ... -//! }; -//! ``` -//! -//! * "Template instantiation": an instantiation is a use of a template with -//! concrete template arguments. For example, `List`. -//! -//! * "Template specialization": an alternative template definition providing a -//! custom definition for instantiations with the matching template -//! arguments. This C++ feature is unsupported by bindgen. For example: -//! -//! ```c++ -//! template<> -//! class List { -//! // Special layout for int lists... -//! }; -//! ``` - -use super::context::{BindgenContext, ItemId, TypeId}; -use super::item::{IsOpaque, Item, ItemAncestors}; -use super::traversal::{EdgeKind, Trace, Tracer}; -use crate::clang; -use crate::parse::ClangItemParser; - -/// Template declaration (and such declaration's template parameters) related -/// methods. -/// -/// This trait's methods distinguish between `None` and `Some([])` for -/// declarations that are not templates and template declarations with zero -/// parameters, in general. -/// -/// Consider this example: -/// -/// ```c++ -/// template -/// class Foo { -/// T use_of_t; -/// U use_of_u; -/// -/// template -/// using Bar = V*; -/// -/// class Inner { -/// T x; -/// U y; -/// Bar z; -/// }; -/// -/// template -/// class Lol { -/// // No use of W, but here's a use of T. -/// T t; -/// }; -/// -/// template -/// class Wtf { -/// // X is not used because W is not used. -/// Lol lololol; -/// }; -/// }; -/// -/// class Qux { -/// int y; -/// }; -/// ``` -/// -/// The following table depicts the results of each trait method when invoked on -/// each of the declarations above: -/// -/// +------+----------------------+--------------------------+------------------------+---- -/// |Decl. | self_template_params | num_self_template_params | all_template_parameters| ... -/// +------+----------------------+--------------------------+------------------------+---- -/// |Foo | [T, U] | 2 | [T, U] | ... -/// |Bar | [V] | 1 | [T, U, V] | ... -/// |Inner | [] | 0 | [T, U] | ... -/// |Lol | [W] | 1 | [T, U, W] | ... -/// |Wtf | [X] | 1 | [T, U, X] | ... -/// |Qux | [] | 0 | [] | ... -/// +------+----------------------+--------------------------+------------------------+---- -/// -/// ----+------+-----+----------------------+ -/// ... |Decl. | ... | used_template_params | -/// ----+------+-----+----------------------+ -/// ... |Foo | ... | [T, U] | -/// ... |Bar | ... | [V] | -/// ... |Inner | ... | [] | -/// ... |Lol | ... | [T] | -/// ... |Wtf | ... | [T] | -/// ... |Qux | ... | [] | -/// ----+------+-----+----------------------+ -pub trait TemplateParameters: Sized { - /// Get the set of `ItemId`s that make up this template declaration's free - /// template parameters. - /// - /// Note that these might *not* all be named types: C++ allows - /// constant-value template parameters as well as template-template - /// parameters. Of course, Rust does not allow generic parameters to be - /// anything but types, so we must treat them as opaque, and avoid - /// instantiating them. - fn self_template_params(&self, ctx: &BindgenContext) -> Vec; - - /// Get the number of free template parameters this template declaration - /// has. - fn num_self_template_params(&self, ctx: &BindgenContext) -> usize { - self.self_template_params(ctx).len() - } - - /// Get the complete set of template parameters that can affect this - /// declaration. - /// - /// Note that this item doesn't need to be a template declaration itself for - /// `Some` to be returned here (in contrast to `self_template_params`). If - /// this item is a member of a template declaration, then the parent's - /// template parameters are included here. - /// - /// In the example above, `Inner` depends on both of the `T` and `U` type - /// parameters, even though it is not itself a template declaration and - /// therefore has no type parameters itself. Perhaps it helps to think about - /// how we would fully reference such a member type in C++: - /// `Foo::Inner`. `Foo` *must* be instantiated with template - /// arguments before we can gain access to the `Inner` member type. - fn all_template_params(&self, ctx: &BindgenContext) -> Vec - where - Self: ItemAncestors, - { - let mut ancestors: Vec<_> = self.ancestors(ctx).collect(); - ancestors.reverse(); - ancestors - .into_iter() - .flat_map(|id| id.self_template_params(ctx).into_iter()) - .collect() - } - - /// Get only the set of template parameters that this item uses. This is a - /// subset of `all_template_params` and does not necessarily contain any of - /// `self_template_params`. - fn used_template_params(&self, ctx: &BindgenContext) -> Vec - where - Self: AsRef, - { - assert!( - ctx.in_codegen_phase(), - "template parameter usage is not computed until codegen" - ); - - let id = *self.as_ref(); - ctx.resolve_item(id) - .all_template_params(ctx) - .into_iter() - .filter(|p| ctx.uses_template_parameter(id, *p)) - .collect() - } -} - -/// A trait for things which may or may not be a named template type parameter. -pub trait AsTemplateParam { - /// Any extra information the implementor might need to make this decision. - type Extra; - - /// Convert this thing to the item id of a named template type parameter. - fn as_template_param( - &self, - ctx: &BindgenContext, - extra: &Self::Extra, - ) -> Option; - - /// Is this a named template type parameter? - fn is_template_param( - &self, - ctx: &BindgenContext, - extra: &Self::Extra, - ) -> bool { - self.as_template_param(ctx, extra).is_some() - } -} - -/// A concrete instantiation of a generic template. -#[derive(Clone, Debug)] -pub struct TemplateInstantiation { - /// The template definition which this is instantiating. - definition: TypeId, - /// The concrete template arguments, which will be substituted in the - /// definition for the generic template parameters. - args: Vec, -} - -impl TemplateInstantiation { - /// Construct a new template instantiation from the given parts. - pub fn new(definition: TypeId, args: I) -> TemplateInstantiation - where - I: IntoIterator, - { - TemplateInstantiation { - definition, - args: args.into_iter().collect(), - } - } - - /// Get the template definition for this instantiation. - pub fn template_definition(&self) -> TypeId { - self.definition - } - - /// Get the concrete template arguments used in this instantiation. - pub fn template_arguments(&self) -> &[TypeId] { - &self.args[..] - } - - /// Parse a `TemplateInstantiation` from a clang `Type`. - pub fn from_ty( - ty: &clang::Type, - ctx: &mut BindgenContext, - ) -> Option { - use clang_sys::*; - - let template_args = ty.template_args().map_or(vec![], |args| match ty - .canonical_type() - .template_args() - { - Some(canonical_args) => { - let arg_count = args.len(); - args.chain(canonical_args.skip(arg_count)) - .filter(|t| t.kind() != CXType_Invalid) - .map(|t| { - Item::from_ty_or_ref(t, t.declaration(), None, ctx) - }) - .collect() - } - None => args - .filter(|t| t.kind() != CXType_Invalid) - .map(|t| Item::from_ty_or_ref(t, t.declaration(), None, ctx)) - .collect(), - }); - - let declaration = ty.declaration(); - let definition = if declaration.kind() == CXCursor_TypeAliasTemplateDecl - { - Some(declaration) - } else { - declaration.specialized().or_else(|| { - let mut template_ref = None; - ty.declaration().visit(|child| { - if child.kind() == CXCursor_TemplateRef { - template_ref = Some(child); - return CXVisit_Break; - } - - // Instantiations of template aliases might have the - // TemplateRef to the template alias definition arbitrarily - // deep, so we need to recurse here and not only visit - // direct children. - CXChildVisit_Recurse - }); - - template_ref.and_then(|cur| cur.referenced()) - }) - }; - - let definition = match definition { - Some(def) => def, - None => { - if !ty.declaration().is_builtin() { - warn!( - "Could not find template definition for template \ - instantiation" - ); - } - return None; - } - }; - - let template_definition = - Item::from_ty_or_ref(definition.cur_type(), definition, None, ctx); - - Some(TemplateInstantiation::new( - template_definition, - template_args, - )) - } -} - -impl IsOpaque for TemplateInstantiation { - type Extra = Item; - - /// Is this an opaque template instantiation? - fn is_opaque(&self, ctx: &BindgenContext, item: &Item) -> bool { - if self.template_definition().is_opaque(ctx, &()) { - return true; - } - - // TODO(#774): This doesn't properly handle opaque instantiations where - // an argument is itself an instantiation because `canonical_name` does - // not insert the template arguments into the name, ie it for nested - // template arguments it creates "Foo" instead of "Foo". The fully - // correct fix is to make `canonical_{name,path}` include template - // arguments properly. - - let mut path = item.path_for_allowlisting(ctx).clone(); - let args: Vec<_> = self - .template_arguments() - .iter() - .map(|arg| { - let arg_path = - ctx.resolve_item(*arg).path_for_allowlisting(ctx); - arg_path[1..].join("::") - }) - .collect(); - { - let last = path.last_mut().unwrap(); - last.push('<'); - last.push_str(&args.join(", ")); - last.push('>'); - } - - ctx.opaque_by_name(&path) - } -} - -impl Trace for TemplateInstantiation { - type Extra = (); - - fn trace(&self, _ctx: &BindgenContext, tracer: &mut T, _: &()) - where - T: Tracer, - { - tracer - .visit_kind(self.definition.into(), EdgeKind::TemplateDeclaration); - for arg in self.template_arguments() { - tracer.visit_kind(arg.into(), EdgeKind::TemplateArgument); - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/traversal.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/traversal.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/traversal.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/traversal.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,508 +0,0 @@ -//! Traversal of the graph of IR items and types. - -use super::context::{BindgenContext, ItemId}; -use super::item::ItemSet; -use std::collections::{BTreeMap, VecDeque}; - -/// An outgoing edge in the IR graph is a reference from some item to another -/// item: -/// -/// from --> to -/// -/// The `from` is left implicit: it is the concrete `Trace` implementer which -/// yielded this outgoing edge. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct Edge { - to: ItemId, - kind: EdgeKind, -} - -impl Edge { - /// Construct a new edge whose referent is `to` and is of the given `kind`. - pub fn new(to: ItemId, kind: EdgeKind) -> Edge { - Edge { to, kind } - } -} - -impl From for ItemId { - fn from(val: Edge) -> Self { - val.to - } -} - -/// The kind of edge reference. This is useful when we wish to only consider -/// certain kinds of edges for a particular traversal or analysis. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum EdgeKind { - /// A generic, catch-all edge. - Generic, - - /// An edge from a template declaration, to the definition of a named type - /// parameter. For example, the edge from `Foo` to `T` in the following - /// snippet: - /// - /// ```C++ - /// template - /// class Foo { }; - /// ``` - TemplateParameterDefinition, - - /// An edge from a template instantiation to the template declaration that - /// is being instantiated. For example, the edge from `Foo` to - /// to `Foo`: - /// - /// ```C++ - /// template - /// class Foo { }; - /// - /// using Bar = Foo; - /// ``` - TemplateDeclaration, - - /// An edge from a template instantiation to its template argument. For - /// example, `Foo` to `Bar`: - /// - /// ```C++ - /// template - /// class Foo { }; - /// - /// class Bar { }; - /// - /// using FooBar = Foo; - /// ``` - TemplateArgument, - - /// An edge from a compound type to one of its base member types. For - /// example, the edge from `Bar` to `Foo`: - /// - /// ```C++ - /// class Foo { }; - /// - /// class Bar : public Foo { }; - /// ``` - BaseMember, - - /// An edge from a compound type to the types of one of its fields. For - /// example, the edge from `Foo` to `int`: - /// - /// ```C++ - /// class Foo { - /// int x; - /// }; - /// ``` - Field, - - /// An edge from an class or struct type to an inner type member. For - /// example, the edge from `Foo` to `Foo::Bar` here: - /// - /// ```C++ - /// class Foo { - /// struct Bar { }; - /// }; - /// ``` - InnerType, - - /// An edge from an class or struct type to an inner static variable. For - /// example, the edge from `Foo` to `Foo::BAR` here: - /// - /// ```C++ - /// class Foo { - /// static const char* BAR; - /// }; - /// ``` - InnerVar, - - /// An edge from a class or struct type to one of its method functions. For - /// example, the edge from `Foo` to `Foo::bar`: - /// - /// ```C++ - /// class Foo { - /// bool bar(int x, int y); - /// }; - /// ``` - Method, - - /// An edge from a class or struct type to one of its constructor - /// functions. For example, the edge from `Foo` to `Foo::Foo(int x, int y)`: - /// - /// ```C++ - /// class Foo { - /// int my_x; - /// int my_y; - /// - /// public: - /// Foo(int x, int y); - /// }; - /// ``` - Constructor, - - /// An edge from a class or struct type to its destructor function. For - /// example, the edge from `Doggo` to `Doggo::~Doggo()`: - /// - /// ```C++ - /// struct Doggo { - /// char* wow; - /// - /// public: - /// ~Doggo(); - /// }; - /// ``` - Destructor, - - /// An edge from a function declaration to its return type. For example, the - /// edge from `foo` to `int`: - /// - /// ```C++ - /// int foo(char* string); - /// ``` - FunctionReturn, - - /// An edge from a function declaration to one of its parameter types. For - /// example, the edge from `foo` to `char*`: - /// - /// ```C++ - /// int foo(char* string); - /// ``` - FunctionParameter, - - /// An edge from a static variable to its type. For example, the edge from - /// `FOO` to `const char*`: - /// - /// ```C++ - /// static const char* FOO; - /// ``` - VarType, - - /// An edge from a non-templated alias or typedef to the referenced type. - TypeReference, -} - -/// A predicate to allow visiting only sub-sets of the whole IR graph by -/// excluding certain edges from being followed by the traversal. -pub trait TraversalPredicate { - /// Should the traversal follow this edge, and visit everything that is - /// reachable through it? - fn should_follow(&self, ctx: &BindgenContext, edge: Edge) -> bool; -} - -impl TraversalPredicate for for<'a> fn(&'a BindgenContext, Edge) -> bool { - fn should_follow(&self, ctx: &BindgenContext, edge: Edge) -> bool { - (*self)(ctx, edge) - } -} - -/// A `TraversalPredicate` implementation that follows all edges, and therefore -/// traversals using this predicate will see the whole IR graph reachable from -/// the traversal's roots. -pub fn all_edges(_: &BindgenContext, _: Edge) -> bool { - true -} - -/// A `TraversalPredicate` implementation that only follows -/// `EdgeKind::InnerType` edges, and therefore traversals using this predicate -/// will only visit the traversal's roots and their inner types. This is used -/// in no-recursive-allowlist mode, where inner types such as anonymous -/// structs/unions still need to be processed. -pub fn only_inner_type_edges(_: &BindgenContext, edge: Edge) -> bool { - edge.kind == EdgeKind::InnerType -} - -/// A `TraversalPredicate` implementation that only follows edges to items that -/// are enabled for code generation. This lets us skip considering items for -/// which are not reachable from code generation. -pub fn codegen_edges(ctx: &BindgenContext, edge: Edge) -> bool { - let cc = &ctx.options().codegen_config; - match edge.kind { - EdgeKind::Generic => { - ctx.resolve_item(edge.to).is_enabled_for_codegen(ctx) - } - - // We statically know the kind of item that non-generic edges can point - // to, so we don't need to actually resolve the item and check - // `Item::is_enabled_for_codegen`. - EdgeKind::TemplateParameterDefinition | - EdgeKind::TemplateArgument | - EdgeKind::TemplateDeclaration | - EdgeKind::BaseMember | - EdgeKind::Field | - EdgeKind::InnerType | - EdgeKind::FunctionReturn | - EdgeKind::FunctionParameter | - EdgeKind::VarType | - EdgeKind::TypeReference => cc.types(), - EdgeKind::InnerVar => cc.vars(), - EdgeKind::Method => cc.methods(), - EdgeKind::Constructor => cc.constructors(), - EdgeKind::Destructor => cc.destructors(), - } -} - -/// The storage for the set of items that have been seen (although their -/// outgoing edges might not have been fully traversed yet) in an active -/// traversal. -pub trait TraversalStorage<'ctx> { - /// Construct a new instance of this TraversalStorage, for a new traversal. - fn new(ctx: &'ctx BindgenContext) -> Self; - - /// Add the given item to the storage. If the item has never been seen - /// before, return `true`. Otherwise, return `false`. - /// - /// The `from` item is the item from which we discovered this item, or is - /// `None` if this item is a root. - fn add(&mut self, from: Option, item: ItemId) -> bool; -} - -impl<'ctx> TraversalStorage<'ctx> for ItemSet { - fn new(_: &'ctx BindgenContext) -> Self { - ItemSet::new() - } - - fn add(&mut self, _: Option, item: ItemId) -> bool { - self.insert(item) - } -} - -/// A `TraversalStorage` implementation that keeps track of how we first reached -/// each item. This is useful for providing debug assertions with meaningful -/// diagnostic messages about dangling items. -#[derive(Debug)] -pub struct Paths<'ctx>(BTreeMap, &'ctx BindgenContext); - -impl<'ctx> TraversalStorage<'ctx> for Paths<'ctx> { - fn new(ctx: &'ctx BindgenContext) -> Self { - Paths(BTreeMap::new(), ctx) - } - - fn add(&mut self, from: Option, item: ItemId) -> bool { - let newly_discovered = - self.0.insert(item, from.unwrap_or(item)).is_none(); - - if self.1.resolve_item_fallible(item).is_none() { - let mut path = vec![]; - let mut current = item; - loop { - let predecessor = *self.0.get(¤t).expect( - "We know we found this item id, so it must have a \ - predecessor", - ); - if predecessor == current { - break; - } - path.push(predecessor); - current = predecessor; - } - path.reverse(); - panic!( - "Found reference to dangling id = {:?}\nvia path = {:?}", - item, path - ); - } - - newly_discovered - } -} - -/// The queue of seen-but-not-yet-traversed items. -/// -/// Using a FIFO queue with a traversal will yield a breadth-first traversal, -/// while using a LIFO queue will result in a depth-first traversal of the IR -/// graph. -pub trait TraversalQueue: Default { - /// Add a newly discovered item to the queue. - fn push(&mut self, item: ItemId); - - /// Pop the next item to traverse, if any. - fn next(&mut self) -> Option; -} - -impl TraversalQueue for Vec { - fn push(&mut self, item: ItemId) { - self.push(item); - } - - fn next(&mut self) -> Option { - self.pop() - } -} - -impl TraversalQueue for VecDeque { - fn push(&mut self, item: ItemId) { - self.push_back(item); - } - - fn next(&mut self) -> Option { - self.pop_front() - } -} - -/// Something that can receive edges from a `Trace` implementation. -pub trait Tracer { - /// Note an edge between items. Called from within a `Trace` implementation. - fn visit_kind(&mut self, item: ItemId, kind: EdgeKind); - - /// A synonym for `tracer.visit_kind(item, EdgeKind::Generic)`. - fn visit(&mut self, item: ItemId) { - self.visit_kind(item, EdgeKind::Generic); - } -} - -impl Tracer for F -where - F: FnMut(ItemId, EdgeKind), -{ - fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) { - (*self)(item, kind) - } -} - -/// Trace all of the outgoing edges to other items. Implementations should call -/// one of `tracer.visit(edge)` or `tracer.visit_kind(edge, EdgeKind::Whatever)` -/// for each of their outgoing edges. -pub trait Trace { - /// If a particular type needs extra information beyond what it has in - /// `self` and `context` to find its referenced items, its implementation - /// can define this associated type, forcing callers to pass the needed - /// information through. - type Extra; - - /// Trace all of this item's outgoing edges to other items. - fn trace( - &self, - context: &BindgenContext, - tracer: &mut T, - extra: &Self::Extra, - ) where - T: Tracer; -} - -/// An graph traversal of the transitive closure of references between items. -/// -/// See `BindgenContext::allowlisted_items` for more information. -pub struct ItemTraversal<'ctx, Storage, Queue, Predicate> -where - Storage: TraversalStorage<'ctx>, - Queue: TraversalQueue, - Predicate: TraversalPredicate, -{ - ctx: &'ctx BindgenContext, - - /// The set of items we have seen thus far in this traversal. - seen: Storage, - - /// The set of items that we have seen, but have yet to traverse. - queue: Queue, - - /// The predicate that determines which edges this traversal will follow. - predicate: Predicate, - - /// The item we are currently traversing. - currently_traversing: Option, -} - -impl<'ctx, Storage, Queue, Predicate> - ItemTraversal<'ctx, Storage, Queue, Predicate> -where - Storage: TraversalStorage<'ctx>, - Queue: TraversalQueue, - Predicate: TraversalPredicate, -{ - /// Begin a new traversal, starting from the given roots. - pub fn new( - ctx: &'ctx BindgenContext, - roots: R, - predicate: Predicate, - ) -> ItemTraversal<'ctx, Storage, Queue, Predicate> - where - R: IntoIterator, - { - let mut seen = Storage::new(ctx); - let mut queue = Queue::default(); - - for id in roots { - seen.add(None, id); - queue.push(id); - } - - ItemTraversal { - ctx, - seen, - queue, - predicate, - currently_traversing: None, - } - } -} - -impl<'ctx, Storage, Queue, Predicate> Tracer - for ItemTraversal<'ctx, Storage, Queue, Predicate> -where - Storage: TraversalStorage<'ctx>, - Queue: TraversalQueue, - Predicate: TraversalPredicate, -{ - fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) { - let edge = Edge::new(item, kind); - if !self.predicate.should_follow(self.ctx, edge) { - return; - } - - let is_newly_discovered = - self.seen.add(self.currently_traversing, item); - if is_newly_discovered { - self.queue.push(item) - } - } -} - -impl<'ctx, Storage, Queue, Predicate> Iterator - for ItemTraversal<'ctx, Storage, Queue, Predicate> -where - Storage: TraversalStorage<'ctx>, - Queue: TraversalQueue, - Predicate: TraversalPredicate, -{ - type Item = ItemId; - - fn next(&mut self) -> Option { - let id = self.queue.next()?; - - let newly_discovered = self.seen.add(None, id); - debug_assert!( - !newly_discovered, - "should have already seen anything we get out of our queue" - ); - debug_assert!( - self.ctx.resolve_item_fallible(id).is_some(), - "should only get IDs of actual items in our context during traversal" - ); - - self.currently_traversing = Some(id); - id.trace(self.ctx, self, &()); - self.currently_traversing = None; - - Some(id) - } -} - -/// An iterator to find any dangling items. -/// -/// See `BindgenContext::assert_no_dangling_item_traversal` for more -/// information. -pub type AssertNoDanglingItemsTraversal<'ctx> = ItemTraversal< - 'ctx, - Paths<'ctx>, - VecDeque, - for<'a> fn(&'a BindgenContext, Edge) -> bool, ->; - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - #[allow(dead_code)] - fn traversal_predicate_is_object_safe() { - // This should compile only if TraversalPredicate is object safe. - fn takes_by_trait_object(_: &dyn TraversalPredicate) {} - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/ty.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/ty.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/ty.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/ty.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1250 +0,0 @@ -//! Everything related to types in our intermediate representation. - -use super::comp::CompInfo; -use super::context::{BindgenContext, ItemId, TypeId}; -use super::dot::DotAttributes; -use super::enum_ty::Enum; -use super::function::FunctionSig; -use super::int::IntKind; -use super::item::{IsOpaque, Item}; -use super::layout::{Layout, Opaque}; -use super::objc::ObjCInterface; -use super::template::{ - AsTemplateParam, TemplateInstantiation, TemplateParameters, -}; -use super::traversal::{EdgeKind, Trace, Tracer}; -use crate::clang::{self, Cursor}; -use crate::parse::{ClangItemParser, ParseError, ParseResult}; -use std::borrow::Cow; -use std::io; - -/// The base representation of a type in bindgen. -/// -/// A type has an optional name, which if present cannot be empty, a `layout` -/// (size, alignment and packedness) if known, a `Kind`, which determines which -/// kind of type it is, and whether the type is const. -#[derive(Debug)] -pub struct Type { - /// The name of the type, or None if it was an unnamed struct or union. - name: Option, - /// The layout of the type, if known. - layout: Option, - /// The inner kind of the type - kind: TypeKind, - /// Whether this type is const-qualified. - is_const: bool, -} - -/// The maximum number of items in an array for which Rust implements common -/// traits, and so if we have a type containing an array with more than this -/// many items, we won't be able to derive common traits on that type. -/// -pub const RUST_DERIVE_IN_ARRAY_LIMIT: usize = 32; - -impl Type { - /// Get the underlying `CompInfo` for this type, or `None` if this is some - /// other kind of type. - pub fn as_comp(&self) -> Option<&CompInfo> { - match self.kind { - TypeKind::Comp(ref ci) => Some(ci), - _ => None, - } - } - - /// Get the underlying `CompInfo` for this type as a mutable reference, or - /// `None` if this is some other kind of type. - pub fn as_comp_mut(&mut self) -> Option<&mut CompInfo> { - match self.kind { - TypeKind::Comp(ref mut ci) => Some(ci), - _ => None, - } - } - - /// Construct a new `Type`. - pub fn new( - name: Option, - layout: Option, - kind: TypeKind, - is_const: bool, - ) -> Self { - Type { - name, - layout, - kind, - is_const, - } - } - - /// Which kind of type is this? - pub fn kind(&self) -> &TypeKind { - &self.kind - } - - /// Get a mutable reference to this type's kind. - pub fn kind_mut(&mut self) -> &mut TypeKind { - &mut self.kind - } - - /// Get this type's name. - pub fn name(&self) -> Option<&str> { - self.name.as_deref() - } - - /// Whether this is a block pointer type. - pub fn is_block_pointer(&self) -> bool { - matches!(self.kind, TypeKind::BlockPointer(..)) - } - - /// Is this a compound type? - pub fn is_comp(&self) -> bool { - matches!(self.kind, TypeKind::Comp(..)) - } - - /// Is this a union? - pub fn is_union(&self) -> bool { - match self.kind { - TypeKind::Comp(ref comp) => comp.is_union(), - _ => false, - } - } - - /// Is this type of kind `TypeKind::TypeParam`? - pub fn is_type_param(&self) -> bool { - matches!(self.kind, TypeKind::TypeParam) - } - - /// Is this a template instantiation type? - pub fn is_template_instantiation(&self) -> bool { - matches!(self.kind, TypeKind::TemplateInstantiation(..)) - } - - /// Is this a template alias type? - pub fn is_template_alias(&self) -> bool { - matches!(self.kind, TypeKind::TemplateAlias(..)) - } - - /// Is this a function type? - pub fn is_function(&self) -> bool { - matches!(self.kind, TypeKind::Function(..)) - } - - /// Is this an enum type? - pub fn is_enum(&self) -> bool { - matches!(self.kind, TypeKind::Enum(..)) - } - - /// Is this either a builtin or named type? - pub fn is_builtin_or_type_param(&self) -> bool { - matches!( - self.kind, - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Function(..) | - TypeKind::Array(..) | - TypeKind::Reference(..) | - TypeKind::Pointer(..) | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::TypeParam - ) - } - - /// Creates a new named type, with name `name`. - pub fn named(name: String) -> Self { - let name = if name.is_empty() { None } else { Some(name) }; - Self::new(name, None, TypeKind::TypeParam, false) - } - - /// Is this a floating point type? - pub fn is_float(&self) -> bool { - matches!(self.kind, TypeKind::Float(..)) - } - - /// Is this a boolean type? - pub fn is_bool(&self) -> bool { - matches!(self.kind, TypeKind::Int(IntKind::Bool)) - } - - /// Is this an integer type? - pub fn is_integer(&self) -> bool { - matches!(self.kind, TypeKind::Int(..)) - } - - /// Cast this type to an integer kind, or `None` if it is not an integer - /// type. - pub fn as_integer(&self) -> Option { - match self.kind { - TypeKind::Int(int_kind) => Some(int_kind), - _ => None, - } - } - - /// Is this a `const` qualified type? - pub fn is_const(&self) -> bool { - self.is_const - } - - /// Is this a reference to another type? - pub fn is_type_ref(&self) -> bool { - matches!( - self.kind, - TypeKind::ResolvedTypeRef(_) | TypeKind::UnresolvedTypeRef(_, _, _) - ) - } - - /// Is this an unresolved reference? - pub fn is_unresolved_ref(&self) -> bool { - matches!(self.kind, TypeKind::UnresolvedTypeRef(_, _, _)) - } - - /// Is this a incomplete array type? - pub fn is_incomplete_array(&self, ctx: &BindgenContext) -> Option { - match self.kind { - TypeKind::Array(item, len) => { - if len == 0 { - Some(item.into()) - } else { - None - } - } - TypeKind::ResolvedTypeRef(inner) => { - ctx.resolve_type(inner).is_incomplete_array(ctx) - } - _ => None, - } - } - - /// What is the layout of this type? - pub fn layout(&self, ctx: &BindgenContext) -> Option { - self.layout.or_else(|| { - match self.kind { - TypeKind::Comp(ref ci) => ci.layout(ctx), - TypeKind::Array(inner, length) if length == 0 => Some( - Layout::new(0, ctx.resolve_type(inner).layout(ctx)?.align), - ), - // FIXME(emilio): This is a hack for anonymous union templates. - // Use the actual pointer size! - TypeKind::Pointer(..) => Some(Layout::new( - ctx.target_pointer_size(), - ctx.target_pointer_size(), - )), - TypeKind::ResolvedTypeRef(inner) => { - ctx.resolve_type(inner).layout(ctx) - } - _ => None, - } - }) - } - - /// Whether this named type is an invalid C++ identifier. This is done to - /// avoid generating invalid code with some cases we can't handle, see: - /// - /// tests/headers/381-decltype-alias.hpp - pub fn is_invalid_type_param(&self) -> bool { - match self.kind { - TypeKind::TypeParam => { - let name = self.name().expect("Unnamed named type?"); - !clang::is_valid_identifier(name) - } - _ => false, - } - } - - /// Takes `name`, and returns a suitable identifier representation for it. - fn sanitize_name(name: &str) -> Cow { - if clang::is_valid_identifier(name) { - return Cow::Borrowed(name); - } - - let name = name.replace(|c| c == ' ' || c == ':' || c == '.', "_"); - Cow::Owned(name) - } - - /// Get this type's santizied name. - pub fn sanitized_name<'a>( - &'a self, - ctx: &BindgenContext, - ) -> Option> { - let name_info = match *self.kind() { - TypeKind::Pointer(inner) => Some((inner, Cow::Borrowed("ptr"))), - TypeKind::Reference(inner) => Some((inner, Cow::Borrowed("ref"))), - TypeKind::Array(inner, length) => { - Some((inner, format!("array{}", length).into())) - } - _ => None, - }; - if let Some((inner, prefix)) = name_info { - ctx.resolve_item(inner) - .expect_type() - .sanitized_name(ctx) - .map(|name| format!("{}_{}", prefix, name).into()) - } else { - self.name().map(Self::sanitize_name) - } - } - - /// See safe_canonical_type. - pub fn canonical_type<'tr>( - &'tr self, - ctx: &'tr BindgenContext, - ) -> &'tr Type { - self.safe_canonical_type(ctx) - .expect("Should have been resolved after parsing!") - } - - /// Returns the canonical type of this type, that is, the "inner type". - /// - /// For example, for a `typedef`, the canonical type would be the - /// `typedef`ed type, for a template instantiation, would be the template - /// its specializing, and so on. Return None if the type is unresolved. - pub fn safe_canonical_type<'tr>( - &'tr self, - ctx: &'tr BindgenContext, - ) -> Option<&'tr Type> { - match self.kind { - TypeKind::TypeParam | - TypeKind::Array(..) | - TypeKind::Vector(..) | - TypeKind::Comp(..) | - TypeKind::Opaque | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Complex(..) | - TypeKind::Function(..) | - TypeKind::Enum(..) | - TypeKind::Reference(..) | - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Pointer(..) | - TypeKind::BlockPointer(..) | - TypeKind::ObjCId | - TypeKind::ObjCSel | - TypeKind::ObjCInterface(..) => Some(self), - - TypeKind::ResolvedTypeRef(inner) | - TypeKind::Alias(inner) | - TypeKind::TemplateAlias(inner, _) => { - ctx.resolve_type(inner).safe_canonical_type(ctx) - } - TypeKind::TemplateInstantiation(ref inst) => ctx - .resolve_type(inst.template_definition()) - .safe_canonical_type(ctx), - - TypeKind::UnresolvedTypeRef(..) => None, - } - } - - /// There are some types we don't want to stop at when finding an opaque - /// item, so we can arrive to the proper item that needs to be generated. - pub fn should_be_traced_unconditionally(&self) -> bool { - matches!( - self.kind, - TypeKind::Comp(..) | - TypeKind::Function(..) | - TypeKind::Pointer(..) | - TypeKind::Array(..) | - TypeKind::Reference(..) | - TypeKind::TemplateInstantiation(..) | - TypeKind::ResolvedTypeRef(..) - ) - } -} - -impl IsOpaque for Type { - type Extra = Item; - - fn is_opaque(&self, ctx: &BindgenContext, item: &Item) -> bool { - match self.kind { - TypeKind::Opaque => true, - TypeKind::TemplateInstantiation(ref inst) => { - inst.is_opaque(ctx, item) - } - TypeKind::Comp(ref comp) => comp.is_opaque(ctx, &self.layout), - TypeKind::ResolvedTypeRef(to) => to.is_opaque(ctx, &()), - _ => false, - } - } -} - -impl AsTemplateParam for Type { - type Extra = Item; - - fn as_template_param( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> Option { - self.kind.as_template_param(ctx, item) - } -} - -impl AsTemplateParam for TypeKind { - type Extra = Item; - - fn as_template_param( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> Option { - match *self { - TypeKind::TypeParam => Some(item.id().expect_type_id(ctx)), - TypeKind::ResolvedTypeRef(id) => id.as_template_param(ctx, &()), - _ => None, - } - } -} - -impl DotAttributes for Type { - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - if let Some(ref layout) = self.layout { - writeln!( - out, - "size{} - align{}", - layout.size, layout.align - )?; - if layout.packed { - writeln!(out, "packedtrue")?; - } - } - - if self.is_const { - writeln!(out, "consttrue")?; - } - - self.kind.dot_attributes(ctx, out) - } -} - -impl DotAttributes for TypeKind { - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!( - out, - "type kind{}", - self.kind_name() - )?; - - if let TypeKind::Comp(ref comp) = *self { - comp.dot_attributes(ctx, out)?; - } - - Ok(()) - } -} - -impl TypeKind { - fn kind_name(&self) -> &'static str { - match *self { - TypeKind::Void => "Void", - TypeKind::NullPtr => "NullPtr", - TypeKind::Comp(..) => "Comp", - TypeKind::Opaque => "Opaque", - TypeKind::Int(..) => "Int", - TypeKind::Float(..) => "Float", - TypeKind::Complex(..) => "Complex", - TypeKind::Alias(..) => "Alias", - TypeKind::TemplateAlias(..) => "TemplateAlias", - TypeKind::Array(..) => "Array", - TypeKind::Vector(..) => "Vector", - TypeKind::Function(..) => "Function", - TypeKind::Enum(..) => "Enum", - TypeKind::Pointer(..) => "Pointer", - TypeKind::BlockPointer(..) => "BlockPointer", - TypeKind::Reference(..) => "Reference", - TypeKind::TemplateInstantiation(..) => "TemplateInstantiation", - TypeKind::UnresolvedTypeRef(..) => "UnresolvedTypeRef", - TypeKind::ResolvedTypeRef(..) => "ResolvedTypeRef", - TypeKind::TypeParam => "TypeParam", - TypeKind::ObjCInterface(..) => "ObjCInterface", - TypeKind::ObjCId => "ObjCId", - TypeKind::ObjCSel => "ObjCSel", - } - } -} - -#[test] -fn is_invalid_type_param_valid() { - let ty = Type::new(Some("foo".into()), None, TypeKind::TypeParam, false); - assert!(!ty.is_invalid_type_param()) -} - -#[test] -fn is_invalid_type_param_valid_underscore_and_numbers() { - let ty = Type::new( - Some("_foo123456789_".into()), - None, - TypeKind::TypeParam, - false, - ); - assert!(!ty.is_invalid_type_param()) -} - -#[test] -fn is_invalid_type_param_valid_unnamed_kind() { - let ty = Type::new(Some("foo".into()), None, TypeKind::Void, false); - assert!(!ty.is_invalid_type_param()) -} - -#[test] -fn is_invalid_type_param_invalid_start() { - let ty = Type::new(Some("1foo".into()), None, TypeKind::TypeParam, false); - assert!(ty.is_invalid_type_param()) -} - -#[test] -fn is_invalid_type_param_invalid_remaing() { - let ty = Type::new(Some("foo-".into()), None, TypeKind::TypeParam, false); - assert!(ty.is_invalid_type_param()) -} - -#[test] -#[should_panic] -fn is_invalid_type_param_unnamed() { - let ty = Type::new(None, None, TypeKind::TypeParam, false); - assert!(ty.is_invalid_type_param()) -} - -#[test] -fn is_invalid_type_param_empty_name() { - let ty = Type::new(Some("".into()), None, TypeKind::TypeParam, false); - assert!(ty.is_invalid_type_param()) -} - -impl TemplateParameters for Type { - fn self_template_params(&self, ctx: &BindgenContext) -> Vec { - self.kind.self_template_params(ctx) - } -} - -impl TemplateParameters for TypeKind { - fn self_template_params(&self, ctx: &BindgenContext) -> Vec { - match *self { - TypeKind::ResolvedTypeRef(id) => { - ctx.resolve_type(id).self_template_params(ctx) - } - TypeKind::Comp(ref comp) => comp.self_template_params(ctx), - TypeKind::TemplateAlias(_, ref args) => args.clone(), - - TypeKind::Opaque | - TypeKind::TemplateInstantiation(..) | - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(_) | - TypeKind::Float(_) | - TypeKind::Complex(_) | - TypeKind::Array(..) | - TypeKind::Vector(..) | - TypeKind::Function(_) | - TypeKind::Enum(_) | - TypeKind::Pointer(_) | - TypeKind::BlockPointer(_) | - TypeKind::Reference(_) | - TypeKind::UnresolvedTypeRef(..) | - TypeKind::TypeParam | - TypeKind::Alias(_) | - TypeKind::ObjCId | - TypeKind::ObjCSel | - TypeKind::ObjCInterface(_) => vec![], - } - } -} - -/// The kind of float this type represents. -#[derive(Debug, Copy, Clone, PartialEq)] -pub enum FloatKind { - /// A `float`. - Float, - /// A `double`. - Double, - /// A `long double`. - LongDouble, - /// A `__float128`. - Float128, -} - -/// The different kinds of types that we can parse. -#[derive(Debug)] -pub enum TypeKind { - /// The void type. - Void, - - /// The `nullptr_t` type. - NullPtr, - - /// A compound type, that is, a class, struct, or union. - Comp(CompInfo), - - /// An opaque type that we just don't understand. All usage of this shoulf - /// result in an opaque blob of bytes generated from the containing type's - /// layout. - Opaque, - - /// An integer type, of a given kind. `bool` and `char` are also considered - /// integers. - Int(IntKind), - - /// A floating point type. - Float(FloatKind), - - /// A complex floating point type. - Complex(FloatKind), - - /// A type alias, with a name, that points to another type. - Alias(TypeId), - - /// A templated alias, pointing to an inner type, just as `Alias`, but with - /// template parameters. - TemplateAlias(TypeId, Vec), - - /// A packed vector type: element type, number of elements - Vector(TypeId, usize), - - /// An array of a type and a length. - Array(TypeId, usize), - - /// A function type, with a given signature. - Function(FunctionSig), - - /// An `enum` type. - Enum(Enum), - - /// A pointer to a type. The bool field represents whether it's const or - /// not. - Pointer(TypeId), - - /// A pointer to an Apple block. - BlockPointer(TypeId), - - /// A reference to a type, as in: int& foo(). - Reference(TypeId), - - /// An instantiation of an abstract template definition with a set of - /// concrete template arguments. - TemplateInstantiation(TemplateInstantiation), - - /// A reference to a yet-to-resolve type. This stores the clang cursor - /// itself, and postpones its resolution. - /// - /// These are gone in a phase after parsing where these are mapped to - /// already known types, and are converted to ResolvedTypeRef. - /// - /// see tests/headers/typeref.hpp to see somewhere where this is a problem. - UnresolvedTypeRef( - clang::Type, - clang::Cursor, - /* parent_id */ - Option, - ), - - /// An indirection to another type. - /// - /// These are generated after we resolve a forward declaration, or when we - /// replace one type with another. - ResolvedTypeRef(TypeId), - - /// A named type, that is, a template parameter. - TypeParam, - - /// Objective C interface. Always referenced through a pointer - ObjCInterface(ObjCInterface), - - /// Objective C 'id' type, points to any object - ObjCId, - - /// Objective C selector type - ObjCSel, -} - -impl Type { - /// This is another of the nasty methods. This one is the one that takes - /// care of the core logic of converting a clang type to a `Type`. - /// - /// It's sort of nasty and full of special-casing, but hopefully the - /// comments in every special case justify why they're there. - pub fn from_clang_ty( - potential_id: ItemId, - ty: &clang::Type, - location: Cursor, - parent_id: Option, - ctx: &mut BindgenContext, - ) -> Result, ParseError> { - use clang_sys::*; - { - let already_resolved = ctx.builtin_or_resolved_ty( - potential_id, - parent_id, - ty, - Some(location), - ); - if let Some(ty) = already_resolved { - debug!("{:?} already resolved: {:?}", ty, location); - return Ok(ParseResult::AlreadyResolved(ty.into())); - } - } - - let layout = ty.fallible_layout(ctx).ok(); - let cursor = ty.declaration(); - let mut name = cursor.spelling(); - - debug!( - "from_clang_ty: {:?}, ty: {:?}, loc: {:?}", - potential_id, ty, location - ); - debug!("currently_parsed_types: {:?}", ctx.currently_parsed_types()); - - let canonical_ty = ty.canonical_type(); - - // Parse objc protocols as if they were interfaces - let mut ty_kind = ty.kind(); - match location.kind() { - CXCursor_ObjCProtocolDecl | CXCursor_ObjCCategoryDecl => { - ty_kind = CXType_ObjCInterface - } - _ => {} - } - - // Objective C template type parameter - // FIXME: This is probably wrong, we are attempting to find the - // objc template params, which seem to manifest as a typedef. - // We are rewriting them as id to suppress multiple conflicting - // typedefs at root level - if ty_kind == CXType_Typedef { - let is_template_type_param = - ty.declaration().kind() == CXCursor_TemplateTypeParameter; - let is_canonical_objcpointer = - canonical_ty.kind() == CXType_ObjCObjectPointer; - - // We have found a template type for objc interface - if is_canonical_objcpointer && is_template_type_param { - // Objective-C generics are just ids with fancy name. - // To keep it simple, just name them ids - name = "id".to_owned(); - } - } - - if location.kind() == CXCursor_ClassTemplatePartialSpecialization { - // Sorry! (Not sorry) - warn!( - "Found a partial template specialization; bindgen does not \ - support partial template specialization! Constructing \ - opaque type instead." - ); - return Ok(ParseResult::New( - Opaque::from_clang_ty(&canonical_ty, ctx), - None, - )); - } - - let kind = if location.kind() == CXCursor_TemplateRef || - (ty.template_args().is_some() && ty_kind != CXType_Typedef) - { - // This is a template instantiation. - match TemplateInstantiation::from_ty(ty, ctx) { - Some(inst) => TypeKind::TemplateInstantiation(inst), - None => TypeKind::Opaque, - } - } else { - match ty_kind { - CXType_Unexposed - if *ty != canonical_ty && - canonical_ty.kind() != CXType_Invalid && - ty.ret_type().is_none() && - // Sometime clang desugars some types more than - // what we need, specially with function - // pointers. - // - // We should also try the solution of inverting - // those checks instead of doing this, that is, - // something like: - // - // CXType_Unexposed if ty.ret_type().is_some() - // => { ... } - // - // etc. - !canonical_ty.spelling().contains("type-parameter") => - { - debug!("Looking for canonical type: {:?}", canonical_ty); - return Self::from_clang_ty( - potential_id, - &canonical_ty, - location, - parent_id, - ctx, - ); - } - CXType_Unexposed | CXType_Invalid => { - // For some reason Clang doesn't give us any hint in some - // situations where we should generate a function pointer (see - // tests/headers/func_ptr_in_struct.h), so we do a guess here - // trying to see if it has a valid return type. - if ty.ret_type().is_some() { - let signature = - FunctionSig::from_ty(ty, &location, ctx)?; - TypeKind::Function(signature) - // Same here, with template specialisations we can safely - // assume this is a Comp(..) - } else if ty.is_fully_instantiated_template() { - debug!( - "Template specialization: {:?}, {:?} {:?}", - ty, location, canonical_ty - ); - let complex = CompInfo::from_ty( - potential_id, - ty, - Some(location), - ctx, - ) - .expect("C'mon"); - TypeKind::Comp(complex) - } else { - match location.kind() { - CXCursor_CXXBaseSpecifier | - CXCursor_ClassTemplate => { - if location.kind() == CXCursor_CXXBaseSpecifier - { - // In the case we're parsing a base specifier - // inside an unexposed or invalid type, it means - // that we're parsing one of two things: - // - // * A template parameter. - // * A complex class that isn't exposed. - // - // This means, unfortunately, that there's no - // good way to differentiate between them. - // - // Probably we could try to look at the - // declaration and complicate more this logic, - // but we'll keep it simple... if it's a valid - // C++ identifier, we'll consider it as a - // template parameter. - // - // This is because: - // - // * We expect every other base that is a - // proper identifier (that is, a simple - // struct/union declaration), to be exposed, - // so this path can't be reached in that - // case. - // - // * Quite conveniently, complex base - // specifiers preserve their full names (that - // is: Foo instead of Foo). We can take - // advantage of this. - // - // If we find some edge case where this doesn't - // work (which I guess is unlikely, see the - // different test cases[1][2][3][4]), we'd need - // to find more creative ways of differentiating - // these two cases. - // - // [1]: inherit_named.hpp - // [2]: forward-inherit-struct-with-fields.hpp - // [3]: forward-inherit-struct.hpp - // [4]: inherit-namespaced.hpp - if location.spelling().chars().all(|c| { - c.is_alphanumeric() || c == '_' - }) { - return Err(ParseError::Recurse); - } - } else { - name = location.spelling(); - } - - let complex = CompInfo::from_ty( - potential_id, - ty, - Some(location), - ctx, - ); - match complex { - Ok(complex) => TypeKind::Comp(complex), - Err(_) => { - warn!( - "Could not create complex type \ - from class template or base \ - specifier, using opaque blob" - ); - let opaque = - Opaque::from_clang_ty(ty, ctx); - return Ok(ParseResult::New( - opaque, None, - )); - } - } - } - CXCursor_TypeAliasTemplateDecl => { - debug!("TypeAliasTemplateDecl"); - - // We need to manually unwind this one. - let mut inner = Err(ParseError::Continue); - let mut args = vec![]; - - location.visit(|cur| { - match cur.kind() { - CXCursor_TypeAliasDecl => { - let current = cur.cur_type(); - - debug_assert_eq!( - current.kind(), - CXType_Typedef - ); - - name = current.spelling(); - - let inner_ty = cur - .typedef_type() - .expect("Not valid Type?"); - inner = Ok(Item::from_ty_or_ref( - inner_ty, - cur, - Some(potential_id), - ctx, - )); - } - CXCursor_TemplateTypeParameter => { - let param = Item::type_param( - None, cur, ctx, - ) - .expect( - "Item::type_param shouldn't \ - ever fail if we are looking \ - at a TemplateTypeParameter", - ); - args.push(param); - } - _ => {} - } - CXChildVisit_Continue - }); - - let inner_type = match inner { - Ok(inner) => inner, - Err(..) => { - warn!( - "Failed to parse template alias \ - {:?}", - location - ); - return Err(ParseError::Continue); - } - }; - - TypeKind::TemplateAlias(inner_type, args) - } - CXCursor_TemplateRef => { - let referenced = location.referenced().unwrap(); - let referenced_ty = referenced.cur_type(); - - debug!( - "TemplateRef: location = {:?}; referenced = \ - {:?}; referenced_ty = {:?}", - location, - referenced, - referenced_ty - ); - - return Self::from_clang_ty( - potential_id, - &referenced_ty, - referenced, - parent_id, - ctx, - ); - } - CXCursor_TypeRef => { - let referenced = location.referenced().unwrap(); - let referenced_ty = referenced.cur_type(); - let declaration = referenced_ty.declaration(); - - debug!( - "TypeRef: location = {:?}; referenced = \ - {:?}; referenced_ty = {:?}", - location, referenced, referenced_ty - ); - - let id = Item::from_ty_or_ref_with_id( - potential_id, - referenced_ty, - declaration, - parent_id, - ctx, - ); - return Ok(ParseResult::AlreadyResolved( - id.into(), - )); - } - CXCursor_NamespaceRef => { - return Err(ParseError::Continue); - } - _ => { - if ty.kind() == CXType_Unexposed { - warn!( - "Unexposed type {:?}, recursing inside, \ - loc: {:?}", - ty, - location - ); - return Err(ParseError::Recurse); - } - - warn!("invalid type {:?}", ty); - return Err(ParseError::Continue); - } - } - } - } - CXType_Auto => { - if canonical_ty == *ty { - debug!("Couldn't find deduced type: {:?}", ty); - return Err(ParseError::Continue); - } - - return Self::from_clang_ty( - potential_id, - &canonical_ty, - location, - parent_id, - ctx, - ); - } - // NOTE: We don't resolve pointers eagerly because the pointee type - // might not have been parsed, and if it contains templates or - // something else we might get confused, see the comment inside - // TypeRef. - // - // We might need to, though, if the context is already in the - // process of resolving them. - CXType_ObjCObjectPointer | - CXType_MemberPointer | - CXType_Pointer => { - let pointee = ty.pointee_type().unwrap(); - let inner = - Item::from_ty_or_ref(pointee, location, None, ctx); - TypeKind::Pointer(inner) - } - CXType_BlockPointer => { - let pointee = ty.pointee_type().expect("Not valid Type?"); - let inner = - Item::from_ty_or_ref(pointee, location, None, ctx); - TypeKind::BlockPointer(inner) - } - // XXX: RValueReference is most likely wrong, but I don't think we - // can even add bindings for that, so huh. - CXType_RValueReference | CXType_LValueReference => { - let inner = Item::from_ty_or_ref( - ty.pointee_type().unwrap(), - location, - None, - ctx, - ); - TypeKind::Reference(inner) - } - // XXX DependentSizedArray is wrong - CXType_VariableArray | CXType_DependentSizedArray => { - let inner = Item::from_ty( - ty.elem_type().as_ref().unwrap(), - location, - None, - ctx, - ) - .expect("Not able to resolve array element?"); - TypeKind::Pointer(inner) - } - CXType_IncompleteArray => { - let inner = Item::from_ty( - ty.elem_type().as_ref().unwrap(), - location, - None, - ctx, - ) - .expect("Not able to resolve array element?"); - TypeKind::Array(inner, 0) - } - CXType_FunctionNoProto | CXType_FunctionProto => { - let signature = FunctionSig::from_ty(ty, &location, ctx)?; - TypeKind::Function(signature) - } - CXType_Typedef => { - let inner = cursor.typedef_type().expect("Not valid Type?"); - let inner = - Item::from_ty_or_ref(inner, location, None, ctx); - if inner == potential_id { - warn!( - "Generating oqaque type instead of self-referential \ - typedef"); - // This can happen if we bail out of recursive situations - // within the clang parsing. - TypeKind::Opaque - } else { - TypeKind::Alias(inner) - } - } - CXType_Enum => { - let enum_ = Enum::from_ty(ty, ctx).expect("Not an enum?"); - - if name.is_empty() { - let pretty_name = ty.spelling(); - if clang::is_valid_identifier(&pretty_name) { - name = pretty_name; - } - } - - TypeKind::Enum(enum_) - } - CXType_Record => { - let complex = CompInfo::from_ty( - potential_id, - ty, - Some(location), - ctx, - ) - .expect("Not a complex type?"); - - if name.is_empty() { - // The pretty-printed name may contain typedefed name, - // but may also be "struct (anonymous at .h:1)" - let pretty_name = ty.spelling(); - if clang::is_valid_identifier(&pretty_name) { - name = pretty_name; - } - } - - TypeKind::Comp(complex) - } - CXType_Vector => { - let inner = Item::from_ty( - ty.elem_type().as_ref().unwrap(), - location, - None, - ctx, - ) - .expect("Not able to resolve vector element?"); - TypeKind::Vector(inner, ty.num_elements().unwrap()) - } - CXType_ConstantArray => { - let inner = Item::from_ty( - ty.elem_type().as_ref().unwrap(), - location, - None, - ctx, - ) - .expect("Not able to resolve array element?"); - TypeKind::Array(inner, ty.num_elements().unwrap()) - } - CXType_Elaborated => { - return Self::from_clang_ty( - potential_id, - &ty.named(), - location, - parent_id, - ctx, - ); - } - CXType_ObjCId => TypeKind::ObjCId, - CXType_ObjCSel => TypeKind::ObjCSel, - CXType_ObjCClass | CXType_ObjCInterface => { - let interface = ObjCInterface::from_ty(&location, ctx) - .expect("Not a valid objc interface?"); - name = interface.rust_name(); - TypeKind::ObjCInterface(interface) - } - CXType_Dependent => { - return Err(ParseError::Continue); - } - _ => { - warn!( - "unsupported type: kind = {:?}; ty = {:?}; at {:?}", - ty.kind(), - ty, - location - ); - return Err(ParseError::Continue); - } - } - }; - - let name = if name.is_empty() { None } else { Some(name) }; - - let is_const = ty.is_const() || - (ty.kind() == CXType_ConstantArray && - ty.elem_type() - .map_or(false, |element| element.is_const())); - - let ty = Type::new(name, layout, kind, is_const); - // TODO: maybe declaration.canonical()? - Ok(ParseResult::New(ty, Some(cursor.canonical()))) - } -} - -impl Trace for Type { - type Extra = Item; - - fn trace(&self, context: &BindgenContext, tracer: &mut T, item: &Item) - where - T: Tracer, - { - match *self.kind() { - TypeKind::Pointer(inner) | - TypeKind::Reference(inner) | - TypeKind::Array(inner, _) | - TypeKind::Vector(inner, _) | - TypeKind::BlockPointer(inner) | - TypeKind::Alias(inner) | - TypeKind::ResolvedTypeRef(inner) => { - tracer.visit_kind(inner.into(), EdgeKind::TypeReference); - } - TypeKind::TemplateAlias(inner, ref template_params) => { - tracer.visit_kind(inner.into(), EdgeKind::TypeReference); - for param in template_params { - tracer.visit_kind( - param.into(), - EdgeKind::TemplateParameterDefinition, - ); - } - } - TypeKind::TemplateInstantiation(ref inst) => { - inst.trace(context, tracer, &()); - } - TypeKind::Comp(ref ci) => ci.trace(context, tracer, item), - TypeKind::Function(ref sig) => sig.trace(context, tracer, &()), - TypeKind::Enum(ref en) => { - if let Some(repr) = en.repr() { - tracer.visit(repr.into()); - } - } - TypeKind::UnresolvedTypeRef(_, _, Some(id)) => { - tracer.visit(id); - } - - TypeKind::ObjCInterface(ref interface) => { - interface.trace(context, tracer, &()); - } - - // None of these variants have edges to other items and types. - TypeKind::Opaque | - TypeKind::UnresolvedTypeRef(_, _, None) | - TypeKind::TypeParam | - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(_) | - TypeKind::Float(_) | - TypeKind::Complex(_) | - TypeKind::ObjCId | - TypeKind::ObjCSel => {} - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/var.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/var.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/var.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/var.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,455 +0,0 @@ -//! Intermediate representation of variables. - -use super::super::codegen::MacroTypeVariation; -use super::context::{BindgenContext, TypeId}; -use super::dot::DotAttributes; -use super::function::cursor_mangling; -use super::int::IntKind; -use super::item::Item; -use super::ty::{FloatKind, TypeKind}; -use crate::callbacks::MacroParsingBehavior; -use crate::clang; -use crate::clang::ClangToken; -use crate::parse::{ - ClangItemParser, ClangSubItemParser, ParseError, ParseResult, -}; -use cexpr; -use std::io; -use std::num::Wrapping; - -/// The type for a constant variable. -#[derive(Debug)] -pub enum VarType { - /// A boolean. - Bool(bool), - /// An integer. - Int(i64), - /// A floating point number. - Float(f64), - /// A character. - Char(u8), - /// A string, not necessarily well-formed utf-8. - String(Vec), -} - -/// A `Var` is our intermediate representation of a variable. -#[derive(Debug)] -pub struct Var { - /// The name of the variable. - name: String, - /// The mangled name of the variable. - mangled_name: Option, - /// The type of the variable. - ty: TypeId, - /// The value of the variable, that needs to be suitable for `ty`. - val: Option, - /// Whether this variable is const. - is_const: bool, -} - -impl Var { - /// Construct a new `Var`. - pub fn new( - name: String, - mangled_name: Option, - ty: TypeId, - val: Option, - is_const: bool, - ) -> Var { - assert!(!name.is_empty()); - Var { - name, - mangled_name, - ty, - val, - is_const, - } - } - - /// Is this variable `const` qualified? - pub fn is_const(&self) -> bool { - self.is_const - } - - /// The value of this constant variable, if any. - pub fn val(&self) -> Option<&VarType> { - self.val.as_ref() - } - - /// Get this variable's type. - pub fn ty(&self) -> TypeId { - self.ty - } - - /// Get this variable's name. - pub fn name(&self) -> &str { - &self.name - } - - /// Get this variable's mangled name. - pub fn mangled_name(&self) -> Option<&str> { - self.mangled_name.as_deref() - } -} - -impl DotAttributes for Var { - fn dot_attributes( - &self, - _ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - if self.is_const { - writeln!(out, "consttrue")?; - } - - if let Some(ref mangled) = self.mangled_name { - writeln!( - out, - "mangled name{}", - mangled - )?; - } - - Ok(()) - } -} - -fn default_macro_constant_type(ctx: &BindgenContext, value: i64) -> IntKind { - if value < 0 || - ctx.options().default_macro_constant_type == - MacroTypeVariation::Signed - { - if value < i32::min_value() as i64 || value > i32::max_value() as i64 { - IntKind::I64 - } else if !ctx.options().fit_macro_constants || - value < i16::min_value() as i64 || - value > i16::max_value() as i64 - { - IntKind::I32 - } else if value < i8::min_value() as i64 || - value > i8::max_value() as i64 - { - IntKind::I16 - } else { - IntKind::I8 - } - } else if value > u32::max_value() as i64 { - IntKind::U64 - } else if !ctx.options().fit_macro_constants || - value > u16::max_value() as i64 - { - IntKind::U32 - } else if value > u8::max_value() as i64 { - IntKind::U16 - } else { - IntKind::U8 - } -} - -/// Determines whether a set of tokens from a CXCursor_MacroDefinition -/// represent a function-like macro. If so, calls the func_macro callback -/// and returns `Err(ParseError::Continue)` to signal to skip further -/// processing. If conversion to UTF-8 fails (it is performed only where it -/// should be infallible), then `Err(ParseError::Continue)` is returned as well. -fn handle_function_macro( - cursor: &clang::Cursor, - tokens: &[ClangToken], - callbacks: &dyn crate::callbacks::ParseCallbacks, -) -> Result<(), ParseError> { - // TODO: Hoist the `is_macro_function_like` check into this function's - // caller, and thus avoid allocating the `tokens` vector for non-functional - // macros. - let is_functional_macro = cursor.is_macro_function_like(); - - if !is_functional_macro { - return Ok(()); - } - - let is_closing_paren = |t: &ClangToken| { - // Test cheap token kind before comparing exact spellings. - t.kind == clang_sys::CXToken_Punctuation && t.spelling() == b")" - }; - let boundary = tokens.iter().position(is_closing_paren); - - let mut spelled = tokens.iter().map(ClangToken::spelling); - // Add 1, to convert index to length. - let left = spelled - .by_ref() - .take(boundary.ok_or(ParseError::Continue)? + 1); - let left = left.collect::>().concat(); - let left = String::from_utf8(left).map_err(|_| ParseError::Continue)?; - let right = spelled; - // Drop last token with LLVM < 4.0, due to an LLVM bug. - // - // See: - // https://bugs.llvm.org//show_bug.cgi?id=9069 - let len = match (right.len(), crate::clang_version().parsed) { - (len, Some((v, _))) if len > 0 && v < 4 => len - 1, - (len, _) => len, - }; - let right: Vec<_> = right.take(len).collect(); - callbacks.func_macro(&left, &right); - - // We handled the macro, skip future macro processing. - Err(ParseError::Continue) -} - -impl ClangSubItemParser for Var { - fn parse( - cursor: clang::Cursor, - ctx: &mut BindgenContext, - ) -> Result, ParseError> { - use cexpr::expr::EvalResult; - use cexpr::literal::CChar; - use clang_sys::*; - match cursor.kind() { - CXCursor_MacroDefinition => { - let tokens: Vec<_> = cursor.tokens().iter().collect(); - - if let Some(callbacks) = ctx.parse_callbacks() { - match callbacks.will_parse_macro(&cursor.spelling()) { - MacroParsingBehavior::Ignore => { - return Err(ParseError::Continue); - } - MacroParsingBehavior::Default => {} - } - - handle_function_macro(&cursor, &tokens, callbacks)?; - } - - let value = parse_macro(ctx, &tokens); - - let (id, value) = match value { - Some(v) => v, - None => return Err(ParseError::Continue), - }; - - assert!(!id.is_empty(), "Empty macro name?"); - - let previously_defined = ctx.parsed_macro(&id); - - // NB: It's important to "note" the macro even if the result is - // not an integer, otherwise we might loose other kind of - // derived macros. - ctx.note_parsed_macro(id.clone(), value.clone()); - - if previously_defined { - let name = String::from_utf8(id).unwrap(); - warn!("Duplicated macro definition: {}", name); - return Err(ParseError::Continue); - } - - // NOTE: Unwrapping, here and above, is safe, because the - // identifier of a token comes straight from clang, and we - // enforce utf8 there, so we should have already panicked at - // this point. - let name = String::from_utf8(id).unwrap(); - let (type_kind, val) = match value { - EvalResult::Invalid => return Err(ParseError::Continue), - EvalResult::Float(f) => { - (TypeKind::Float(FloatKind::Double), VarType::Float(f)) - } - EvalResult::Char(c) => { - let c = match c { - CChar::Char(c) => { - assert_eq!(c.len_utf8(), 1); - c as u8 - } - CChar::Raw(c) => { - assert!(c <= ::std::u8::MAX as u64); - c as u8 - } - }; - - (TypeKind::Int(IntKind::U8), VarType::Char(c)) - } - EvalResult::Str(val) => { - let char_ty = Item::builtin_type( - TypeKind::Int(IntKind::U8), - true, - ctx, - ); - if let Some(callbacks) = ctx.parse_callbacks() { - callbacks.str_macro(&name, &val); - } - (TypeKind::Pointer(char_ty), VarType::String(val)) - } - EvalResult::Int(Wrapping(value)) => { - let kind = ctx - .parse_callbacks() - .and_then(|c| c.int_macro(&name, value)) - .unwrap_or_else(|| { - default_macro_constant_type(ctx, value) - }); - - (TypeKind::Int(kind), VarType::Int(value)) - } - }; - - let ty = Item::builtin_type(type_kind, true, ctx); - - Ok(ParseResult::New( - Var::new(name, None, ty, Some(val), true), - Some(cursor), - )) - } - CXCursor_VarDecl => { - let name = cursor.spelling(); - if name.is_empty() { - warn!("Empty constant name?"); - return Err(ParseError::Continue); - } - - let ty = cursor.cur_type(); - - // TODO(emilio): do we have to special-case constant arrays in - // some other places? - let is_const = ty.is_const() || - (ty.kind() == CXType_ConstantArray && - ty.elem_type() - .map_or(false, |element| element.is_const())); - - let ty = match Item::from_ty(&ty, cursor, None, ctx) { - Ok(ty) => ty, - Err(e) => { - assert_eq!( - ty.kind(), - CXType_Auto, - "Couldn't resolve constant type, and it \ - wasn't an nondeductible auto type!" - ); - return Err(e); - } - }; - - // Note: Ty might not be totally resolved yet, see - // tests/headers/inner_const.hpp - // - // That's fine because in that case we know it's not a literal. - let canonical_ty = ctx - .safe_resolve_type(ty) - .and_then(|t| t.safe_canonical_type(ctx)); - - let is_integer = canonical_ty.map_or(false, |t| t.is_integer()); - let is_float = canonical_ty.map_or(false, |t| t.is_float()); - - // TODO: We could handle `char` more gracefully. - // TODO: Strings, though the lookup is a bit more hard (we need - // to look at the canonical type of the pointee too, and check - // is char, u8, or i8 I guess). - let value = if is_integer { - let kind = match *canonical_ty.unwrap().kind() { - TypeKind::Int(kind) => kind, - _ => unreachable!(), - }; - - let mut val = cursor.evaluate().and_then(|v| v.as_int()); - if val.is_none() || !kind.signedness_matches(val.unwrap()) { - let tu = ctx.translation_unit(); - val = get_integer_literal_from_cursor(&cursor, tu); - } - - val.map(|val| { - if kind == IntKind::Bool { - VarType::Bool(val != 0) - } else { - VarType::Int(val) - } - }) - } else if is_float { - cursor - .evaluate() - .and_then(|v| v.as_double()) - .map(VarType::Float) - } else { - cursor - .evaluate() - .and_then(|v| v.as_literal_string()) - .map(VarType::String) - }; - - let mangling = cursor_mangling(ctx, &cursor); - let var = Var::new(name, mangling, ty, value, is_const); - - Ok(ParseResult::New(var, Some(cursor))) - } - _ => { - /* TODO */ - Err(ParseError::Continue) - } - } - } -} - -/// Try and parse a macro using all the macros parsed until now. -fn parse_macro( - ctx: &BindgenContext, - tokens: &[ClangToken], -) -> Option<(Vec, cexpr::expr::EvalResult)> { - use cexpr::expr; - - let mut cexpr_tokens: Vec<_> = tokens - .iter() - .filter_map(ClangToken::as_cexpr_token) - .collect(); - - let parser = expr::IdentifierParser::new(ctx.parsed_macros()); - - if let Ok((_, (id, val))) = parser.macro_definition(&cexpr_tokens) { - return Some((id.into(), val)); - } - - // Try without the last token, to workaround a libclang bug in versions - // previous to 4.0. - // - // See: - // https://bugs.llvm.org//show_bug.cgi?id=9069 - // https://reviews.llvm.org/D26446 - cexpr_tokens.pop()?; - - match parser.macro_definition(&cexpr_tokens) { - Ok((_, (id, val))) => Some((id.into(), val)), - _ => None, - } -} - -fn parse_int_literal_tokens(cursor: &clang::Cursor) -> Option { - use cexpr::expr; - use cexpr::expr::EvalResult; - - let cexpr_tokens = cursor.cexpr_tokens(); - - // TODO(emilio): We can try to parse other kinds of literals. - match expr::expr(&cexpr_tokens) { - Ok((_, EvalResult::Int(Wrapping(val)))) => Some(val), - _ => None, - } -} - -fn get_integer_literal_from_cursor( - cursor: &clang::Cursor, - unit: &clang::TranslationUnit, -) -> Option { - use clang_sys::*; - let mut value = None; - cursor.visit(|c| { - match c.kind() { - CXCursor_IntegerLiteral | CXCursor_UnaryOperator => { - value = parse_int_literal_tokens(&c); - } - CXCursor_UnexposedExpr => { - value = get_integer_literal_from_cursor(&c, unit); - } - _ => (), - } - if value.is_some() { - CXChildVisit_Break - } else { - CXChildVisit_Continue - } - }); - value -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/lib.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/lib.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/lib.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2729 +0,0 @@ -//! Generate Rust bindings for C and C++ libraries. -//! -//! Provide a C/C++ header file, receive Rust FFI code to call into C/C++ -//! functions and use types defined in the header. -//! -//! See the [`Builder`](./struct.Builder.html) struct for usage. -//! -//! See the [Users Guide](https://rust-lang.github.io/rust-bindgen/) for -//! additional documentation. -#![deny(missing_docs)] -#![deny(unused_extern_crates)] -// To avoid rather annoying warnings when matching with CXCursor_xxx as a -// constant. -#![allow(non_upper_case_globals)] -// `quote!` nests quite deeply. -#![recursion_limit = "128"] - -#[macro_use] -extern crate bitflags; -#[macro_use] -extern crate lazy_static; -#[macro_use] -extern crate quote; - -#[cfg(feature = "logging")] -#[macro_use] -extern crate log; - -#[cfg(not(feature = "logging"))] -#[macro_use] -mod log_stubs; - -#[macro_use] -mod extra_assertions; - -// A macro to declare an internal module for which we *must* provide -// documentation for. If we are building with the "testing_only_docs" feature, -// then the module is declared public, and our `#![deny(missing_docs)]` pragma -// applies to it. This feature is used in CI, so we won't let anything slip by -// undocumented. Normal builds, however, will leave the module private, so that -// we don't expose internals to library consumers. -macro_rules! doc_mod { - ($m:ident, $doc_mod_name:ident) => { - #[cfg(feature = "testing_only_docs")] - pub mod $doc_mod_name { - //! Autogenerated documentation module. - pub use super::$m::*; - } - }; -} - -mod clang; -mod codegen; -mod deps; -mod features; -mod ir; -mod parse; -mod regex_set; -mod time; - -pub mod callbacks; - -doc_mod!(clang, clang_docs); -doc_mod!(features, features_docs); -doc_mod!(ir, ir_docs); -doc_mod!(parse, parse_docs); -doc_mod!(regex_set, regex_set_docs); - -pub use crate::codegen::{AliasVariation, EnumVariation, MacroTypeVariation}; -use crate::features::RustFeatures; -pub use crate::features::{ - RustTarget, LATEST_STABLE_RUST, RUST_TARGET_STRINGS, -}; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::item::Item; -use crate::parse::{ClangItemParser, ParseError}; -use crate::regex_set::RegexSet; - -use std::borrow::Cow; -use std::fs::{File, OpenOptions}; -use std::io::{self, Write}; -use std::path::{Path, PathBuf}; -use std::process::{Command, Stdio}; -use std::{env, iter}; - -// Some convenient typedefs for a fast hash map and hash set. -type HashMap = ::rustc_hash::FxHashMap; -type HashSet = ::rustc_hash::FxHashSet; -pub(crate) use std::collections::hash_map::Entry; - -/// Default prefix for the anon fields. -pub const DEFAULT_ANON_FIELDS_PREFIX: &str = "__bindgen_anon_"; - -fn file_is_cpp(name_file: &str) -> bool { - name_file.ends_with(".hpp") || - name_file.ends_with(".hxx") || - name_file.ends_with(".hh") || - name_file.ends_with(".h++") -} - -fn args_are_cpp(clang_args: &[String]) -> bool { - for w in clang_args.windows(2) { - if w[0] == "-xc++" || w[1] == "-xc++" { - return true; - } - if w[0] == "-x" && w[1] == "c++" { - return true; - } - if w[0] == "-include" && file_is_cpp(&w[1]) { - return true; - } - } - false -} - -bitflags! { - /// A type used to indicate which kind of items we have to generate. - pub struct CodegenConfig: u32 { - /// Whether to generate functions. - const FUNCTIONS = 1 << 0; - /// Whether to generate types. - const TYPES = 1 << 1; - /// Whether to generate constants. - const VARS = 1 << 2; - /// Whether to generate methods. - const METHODS = 1 << 3; - /// Whether to generate constructors - const CONSTRUCTORS = 1 << 4; - /// Whether to generate destructors. - const DESTRUCTORS = 1 << 5; - } -} - -impl CodegenConfig { - /// Returns true if functions should be generated. - pub fn functions(self) -> bool { - self.contains(CodegenConfig::FUNCTIONS) - } - - /// Returns true if types should be generated. - pub fn types(self) -> bool { - self.contains(CodegenConfig::TYPES) - } - - /// Returns true if constants should be generated. - pub fn vars(self) -> bool { - self.contains(CodegenConfig::VARS) - } - - /// Returns true if methds should be generated. - pub fn methods(self) -> bool { - self.contains(CodegenConfig::METHODS) - } - - /// Returns true if constructors should be generated. - pub fn constructors(self) -> bool { - self.contains(CodegenConfig::CONSTRUCTORS) - } - - /// Returns true if destructors should be generated. - pub fn destructors(self) -> bool { - self.contains(CodegenConfig::DESTRUCTORS) - } -} - -impl Default for CodegenConfig { - fn default() -> Self { - CodegenConfig::all() - } -} - -/// Configure and generate Rust bindings for a C/C++ header. -/// -/// This is the main entry point to the library. -/// -/// ```ignore -/// use bindgen::builder; -/// -/// // Configure and generate bindings. -/// let bindings = builder().header("path/to/input/header") -/// .allowlist_type("SomeCoolClass") -/// .allowlist_function("do_some_cool_thing") -/// .generate()?; -/// -/// // Write the generated bindings to an output file. -/// bindings.write_to_file("path/to/output.rs")?; -/// ``` -/// -/// # Enums -/// -/// Bindgen can map C/C++ enums into Rust in different ways. The way bindgen maps enums depends on -/// the pattern passed to several methods: -/// -/// 1. [`constified_enum_module()`](#method.constified_enum_module) -/// 2. [`bitfield_enum()`](#method.bitfield_enum) -/// 3. [`newtype_enum()`](#method.newtype_enum) -/// 4. [`rustified_enum()`](#method.rustified_enum) -/// -/// For each C enum, bindgen tries to match the pattern in the following order: -/// -/// 1. Constified enum module -/// 2. Bitfield enum -/// 3. Newtype enum -/// 4. Rustified enum -/// -/// If none of the above patterns match, then bindgen will generate a set of Rust constants. -/// -/// # Clang arguments -/// -/// Extra arguments can be passed to with clang: -/// 1. [`clang_arg()`](#method.clang_arg): takes a single argument -/// 2. [`clang_args()`](#method.clang_args): takes an iterator of arguments -/// 3. `BINDGEN_EXTRA_CLANG_ARGS` environment variable: whitespace separate -/// environment variable of arguments -/// -/// Clang arguments specific to your crate should be added via the -/// `clang_arg()`/`clang_args()` methods. -/// -/// End-users of the crate may need to set the `BINDGEN_EXTRA_CLANG_ARGS` environment variable to -/// add additional arguments. For example, to build against a different sysroot a user could set -/// `BINDGEN_EXTRA_CLANG_ARGS` to `--sysroot=/path/to/sysroot`. -#[derive(Debug, Default)] -pub struct Builder { - options: BindgenOptions, - input_headers: Vec, - // Tuples of unsaved file contents of the form (name, contents). - input_header_contents: Vec<(String, String)>, -} - -/// Construct a new [`Builder`](./struct.Builder.html). -pub fn builder() -> Builder { - Default::default() -} - -impl Builder { - /// Generates the command line flags use for creating `Builder`. - pub fn command_line_flags(&self) -> Vec { - let mut output_vector: Vec = Vec::new(); - - if let Some(header) = self.input_headers.last().cloned() { - // Positional argument 'header' - output_vector.push(header); - } - - output_vector.push("--rust-target".into()); - output_vector.push(self.options.rust_target.into()); - - // FIXME(emilio): This is a bit hacky, maybe we should stop re-using the - // RustFeatures to store the "disable_untagged_union" call, and make it - // a different flag that we check elsewhere / in generate(). - if !self.options.rust_features.untagged_union && - RustFeatures::from(self.options.rust_target).untagged_union - { - output_vector.push("--disable-untagged-union".into()); - } - - if self.options.default_enum_style != Default::default() { - output_vector.push("--default-enum-style".into()); - output_vector.push( - match self.options.default_enum_style { - codegen::EnumVariation::Rust { - non_exhaustive: false, - } => "rust", - codegen::EnumVariation::Rust { - non_exhaustive: true, - } => "rust_non_exhaustive", - codegen::EnumVariation::NewType { is_bitfield: true } => { - "bitfield" - } - codegen::EnumVariation::NewType { is_bitfield: false } => { - "newtype" - } - codegen::EnumVariation::Consts => "consts", - codegen::EnumVariation::ModuleConsts => "moduleconsts", - } - .into(), - ) - } - - if self.options.default_macro_constant_type != Default::default() { - output_vector.push("--default-macro-constant-type".into()); - output_vector - .push(self.options.default_macro_constant_type.as_str().into()); - } - - if self.options.default_alias_style != Default::default() { - output_vector.push("--default-alias-style".into()); - output_vector - .push(self.options.default_alias_style.as_str().into()); - } - - let regex_sets = &[ - (&self.options.bitfield_enums, "--bitfield-enum"), - (&self.options.newtype_enums, "--newtype-enum"), - (&self.options.rustified_enums, "--rustified-enum"), - ( - &self.options.rustified_non_exhaustive_enums, - "--rustified-enum-non-exhaustive", - ), - ( - &self.options.constified_enum_modules, - "--constified-enum-module", - ), - (&self.options.constified_enums, "--constified-enum"), - (&self.options.type_alias, "--type-alias"), - (&self.options.new_type_alias, "--new-type-alias"), - (&self.options.new_type_alias_deref, "--new-type-alias-deref"), - (&self.options.blocklisted_types, "--blocklist-type"), - (&self.options.blocklisted_functions, "--blocklist-function"), - (&self.options.blocklisted_items, "--blocklist-item"), - (&self.options.blocklisted_files, "--blocklist-file"), - (&self.options.opaque_types, "--opaque-type"), - (&self.options.allowlisted_functions, "--allowlist-function"), - (&self.options.allowlisted_types, "--allowlist-type"), - (&self.options.allowlisted_vars, "--allowlist-var"), - (&self.options.no_partialeq_types, "--no-partialeq"), - (&self.options.no_copy_types, "--no-copy"), - (&self.options.no_debug_types, "--no-debug"), - (&self.options.no_default_types, "--no-default"), - (&self.options.no_hash_types, "--no-hash"), - (&self.options.must_use_types, "--must-use-type"), - ]; - - for (set, flag) in regex_sets { - for item in set.get_items() { - output_vector.push((*flag).to_owned()); - output_vector.push(item.to_owned()); - } - } - - if !self.options.layout_tests { - output_vector.push("--no-layout-tests".into()); - } - - if self.options.impl_debug { - output_vector.push("--impl-debug".into()); - } - - if self.options.impl_partialeq { - output_vector.push("--impl-partialeq".into()); - } - - if !self.options.derive_copy { - output_vector.push("--no-derive-copy".into()); - } - - if !self.options.derive_debug { - output_vector.push("--no-derive-debug".into()); - } - - if !self.options.derive_default { - output_vector.push("--no-derive-default".into()); - } else { - output_vector.push("--with-derive-default".into()); - } - - if self.options.derive_hash { - output_vector.push("--with-derive-hash".into()); - } - - if self.options.derive_partialord { - output_vector.push("--with-derive-partialord".into()); - } - - if self.options.derive_ord { - output_vector.push("--with-derive-ord".into()); - } - - if self.options.derive_partialeq { - output_vector.push("--with-derive-partialeq".into()); - } - - if self.options.derive_eq { - output_vector.push("--with-derive-eq".into()); - } - - if self.options.time_phases { - output_vector.push("--time-phases".into()); - } - - if !self.options.generate_comments { - output_vector.push("--no-doc-comments".into()); - } - - if !self.options.allowlist_recursively { - output_vector.push("--no-recursive-allowlist".into()); - } - - if self.options.objc_extern_crate { - output_vector.push("--objc-extern-crate".into()); - } - - if self.options.generate_block { - output_vector.push("--generate-block".into()); - } - - if self.options.block_extern_crate { - output_vector.push("--block-extern-crate".into()); - } - - if self.options.builtins { - output_vector.push("--builtins".into()); - } - - if let Some(ref prefix) = self.options.ctypes_prefix { - output_vector.push("--ctypes-prefix".into()); - output_vector.push(prefix.clone()); - } - - if self.options.anon_fields_prefix != DEFAULT_ANON_FIELDS_PREFIX { - output_vector.push("--anon-fields-prefix".into()); - output_vector.push(self.options.anon_fields_prefix.clone()); - } - - if self.options.emit_ast { - output_vector.push("--emit-clang-ast".into()); - } - - if self.options.emit_ir { - output_vector.push("--emit-ir".into()); - } - if let Some(ref graph) = self.options.emit_ir_graphviz { - output_vector.push("--emit-ir-graphviz".into()); - output_vector.push(graph.clone()) - } - if self.options.enable_cxx_namespaces { - output_vector.push("--enable-cxx-namespaces".into()); - } - if self.options.enable_function_attribute_detection { - output_vector.push("--enable-function-attribute-detection".into()); - } - if self.options.disable_name_namespacing { - output_vector.push("--disable-name-namespacing".into()); - } - if self.options.disable_nested_struct_naming { - output_vector.push("--disable-nested-struct-naming".into()); - } - - if self.options.disable_header_comment { - output_vector.push("--disable-header-comment".into()); - } - - if !self.options.codegen_config.functions() { - output_vector.push("--ignore-functions".into()); - } - - output_vector.push("--generate".into()); - - //Temporary placeholder for below 4 options - let mut options: Vec = Vec::new(); - if self.options.codegen_config.functions() { - options.push("functions".into()); - } - if self.options.codegen_config.types() { - options.push("types".into()); - } - if self.options.codegen_config.vars() { - options.push("vars".into()); - } - if self.options.codegen_config.methods() { - options.push("methods".into()); - } - if self.options.codegen_config.constructors() { - options.push("constructors".into()); - } - if self.options.codegen_config.destructors() { - options.push("destructors".into()); - } - - output_vector.push(options.join(",")); - - if !self.options.codegen_config.methods() { - output_vector.push("--ignore-methods".into()); - } - - if !self.options.convert_floats { - output_vector.push("--no-convert-floats".into()); - } - - if !self.options.prepend_enum_name { - output_vector.push("--no-prepend-enum-name".into()); - } - - if self.options.fit_macro_constants { - output_vector.push("--fit-macro-constant-types".into()); - } - - if self.options.array_pointers_in_arguments { - output_vector.push("--use-array-pointers-in-arguments".into()); - } - - if let Some(ref wasm_import_module_name) = - self.options.wasm_import_module_name - { - output_vector.push("--wasm-import-module-name".into()); - output_vector.push(wasm_import_module_name.clone()); - } - - for line in &self.options.raw_lines { - output_vector.push("--raw-line".into()); - output_vector.push(line.clone()); - } - - for (module, lines) in &self.options.module_lines { - for line in lines.iter() { - output_vector.push("--module-raw-line".into()); - output_vector.push(module.clone()); - output_vector.push(line.clone()); - } - } - - if self.options.use_core { - output_vector.push("--use-core".into()); - } - - if self.options.conservative_inline_namespaces { - output_vector.push("--conservative-inline-namespaces".into()); - } - - if self.options.generate_inline_functions { - output_vector.push("--generate-inline-functions".into()); - } - - if !self.options.record_matches { - output_vector.push("--no-record-matches".into()); - } - - if self.options.size_t_is_usize { - output_vector.push("--size_t-is-usize".into()); - } - - if !self.options.rustfmt_bindings { - output_vector.push("--no-rustfmt-bindings".into()); - } - - if let Some(path) = self - .options - .rustfmt_configuration_file - .as_ref() - .and_then(|f| f.to_str()) - { - output_vector.push("--rustfmt-configuration-file".into()); - output_vector.push(path.into()); - } - - if let Some(ref name) = self.options.dynamic_library_name { - output_vector.push("--dynamic-loading".into()); - output_vector.push(name.clone()); - } - - if self.options.dynamic_link_require_all { - output_vector.push("--dynamic-link-require-all".into()); - } - - if self.options.respect_cxx_access_specs { - output_vector.push("--respect-cxx-access-specs".into()); - } - - if self.options.translate_enum_integer_types { - output_vector.push("--translate-enum-integer-types".into()); - } - - if self.options.c_naming { - output_vector.push("--c-naming".into()); - } - - if self.options.force_explicit_padding { - output_vector.push("--explicit-padding".into()); - } - - // Add clang arguments - - output_vector.push("--".into()); - - if !self.options.clang_args.is_empty() { - output_vector.extend(self.options.clang_args.iter().cloned()); - } - - if self.input_headers.len() > 1 { - // To pass more than one header, we need to pass all but the last - // header via the `-include` clang arg - for header in &self.input_headers[..self.input_headers.len() - 1] { - output_vector.push("-include".to_string()); - output_vector.push(header.clone()); - } - } - - output_vector - } - - /// Add an input C/C++ header to generate bindings for. - /// - /// This can be used to generate bindings to a single header: - /// - /// ```ignore - /// let bindings = bindgen::Builder::default() - /// .header("input.h") - /// .generate() - /// .unwrap(); - /// ``` - /// - /// Or you can invoke it multiple times to generate bindings to multiple - /// headers: - /// - /// ```ignore - /// let bindings = bindgen::Builder::default() - /// .header("first.h") - /// .header("second.h") - /// .header("third.h") - /// .generate() - /// .unwrap(); - /// ``` - pub fn header>(mut self, header: T) -> Builder { - self.input_headers.push(header.into()); - self - } - - /// Add a depfile output which will be written alongside the generated bindings. - pub fn depfile, D: Into>( - mut self, - output_module: H, - depfile: D, - ) -> Builder { - self.options.depfile = Some(deps::DepfileSpec { - output_module: output_module.into(), - depfile_path: depfile.into(), - }); - self - } - - /// Add `contents` as an input C/C++ header named `name`. - /// - /// The file `name` will be added to the clang arguments. - pub fn header_contents(mut self, name: &str, contents: &str) -> Builder { - // Apparently clang relies on having virtual FS correspondent to - // the real one, so we need absolute paths here - let absolute_path = env::current_dir() - .expect("Cannot retrieve current directory") - .join(name) - .to_str() - .expect("Cannot convert current directory name to string") - .to_owned(); - self.input_header_contents - .push((absolute_path, contents.into())); - self - } - - /// Specify the rust target - /// - /// The default is the latest stable Rust version - pub fn rust_target(mut self, rust_target: RustTarget) -> Self { - self.options.set_rust_target(rust_target); - self - } - - /// Disable support for native Rust unions, if supported. - pub fn disable_untagged_union(mut self) -> Self { - self.options.rust_features.untagged_union = false; - self - } - - /// Disable insertion of bindgen's version identifier into generated - /// bindings. - pub fn disable_header_comment(mut self) -> Self { - self.options.disable_header_comment = true; - self - } - - /// Set the output graphviz file. - pub fn emit_ir_graphviz>(mut self, path: T) -> Builder { - let path = path.into(); - self.options.emit_ir_graphviz = Some(path); - self - } - - /// Whether the generated bindings should contain documentation comments - /// (docstrings) or not. This is set to true by default. - /// - /// Note that clang by default excludes comments from system headers, pass - /// `-fretain-comments-from-system-headers` as - /// [`clang_arg`][Builder::clang_arg] to include them. It can also be told - /// to process all comments (not just documentation ones) using the - /// `-fparse-all-comments` flag. See [slides on clang comment parsing]( - /// https://llvm.org/devmtg/2012-11/Gribenko_CommentParsing.pdf) for - /// background and examples. - pub fn generate_comments(mut self, doit: bool) -> Self { - self.options.generate_comments = doit; - self - } - - /// Whether to allowlist recursively or not. Defaults to true. - /// - /// Given that we have explicitly allowlisted the "initiate_dance_party" - /// function in this C header: - /// - /// ```c - /// typedef struct MoonBoots { - /// int bouncy_level; - /// } MoonBoots; - /// - /// void initiate_dance_party(MoonBoots* boots); - /// ``` - /// - /// We would normally generate bindings to both the `initiate_dance_party` - /// function and the `MoonBoots` struct that it transitively references. By - /// configuring with `allowlist_recursively(false)`, `bindgen` will not emit - /// bindings for anything except the explicitly allowlisted items, and there - /// would be no emitted struct definition for `MoonBoots`. However, the - /// `initiate_dance_party` function would still reference `MoonBoots`! - /// - /// **Disabling this feature will almost certainly cause `bindgen` to emit - /// bindings that will not compile!** If you disable this feature, then it - /// is *your* responsibility to provide definitions for every type that is - /// referenced from an explicitly allowlisted item. One way to provide the - /// definitions is by using the [`Builder::raw_line`](#method.raw_line) - /// method, another would be to define them in Rust and then `include!(...)` - /// the bindings immediately afterwards. - pub fn allowlist_recursively(mut self, doit: bool) -> Self { - self.options.allowlist_recursively = doit; - self - } - - /// Deprecated alias for allowlist_recursively. - #[deprecated(note = "Use allowlist_recursively instead")] - pub fn whitelist_recursively(self, doit: bool) -> Self { - self.allowlist_recursively(doit) - } - - /// Generate `#[macro_use] extern crate objc;` instead of `use objc;` - /// in the prologue of the files generated from objective-c files - pub fn objc_extern_crate(mut self, doit: bool) -> Self { - self.options.objc_extern_crate = doit; - self - } - - /// Generate proper block signatures instead of void pointers. - pub fn generate_block(mut self, doit: bool) -> Self { - self.options.generate_block = doit; - self - } - - /// Generate `#[macro_use] extern crate block;` instead of `use block;` - /// in the prologue of the files generated from apple block files - pub fn block_extern_crate(mut self, doit: bool) -> Self { - self.options.block_extern_crate = doit; - self - } - - /// Whether to use the clang-provided name mangling. This is true by default - /// and probably needed for C++ features. - /// - /// However, some old libclang versions seem to return incorrect results in - /// some cases for non-mangled functions, see [1], so we allow disabling it. - /// - /// [1]: https://github.com/rust-lang/rust-bindgen/issues/528 - pub fn trust_clang_mangling(mut self, doit: bool) -> Self { - self.options.enable_mangling = doit; - self - } - - /// Hide the given type from the generated bindings. Regular expressions are - /// supported. - #[deprecated(note = "Use blocklist_type instead")] - pub fn hide_type>(self, arg: T) -> Builder { - self.blocklist_type(arg) - } - - /// Hide the given type from the generated bindings. Regular expressions are - /// supported. - #[deprecated(note = "Use blocklist_type instead")] - pub fn blacklist_type>(self, arg: T) -> Builder { - self.blocklist_type(arg) - } - - /// Hide the given type from the generated bindings. Regular expressions are - /// supported. - /// - /// To blocklist types prefixed with "mylib" use `"mylib_.*"`. - /// For more complicated expressions check - /// [regex](https://docs.rs/regex/*/regex/) docs - pub fn blocklist_type>(mut self, arg: T) -> Builder { - self.options.blocklisted_types.insert(arg); - self - } - - /// Hide the given function from the generated bindings. Regular expressions - /// are supported. - #[deprecated(note = "Use blocklist_function instead")] - pub fn blacklist_function>(self, arg: T) -> Builder { - self.blocklist_function(arg) - } - - /// Hide the given function from the generated bindings. Regular expressions - /// are supported. - /// - /// To blocklist functions prefixed with "mylib" use `"mylib_.*"`. - /// For more complicated expressions check - /// [regex](https://docs.rs/regex/*/regex/) docs - pub fn blocklist_function>(mut self, arg: T) -> Builder { - self.options.blocklisted_functions.insert(arg); - self - } - - /// Hide the given item from the generated bindings, regardless of - /// whether it's a type, function, module, etc. Regular - /// expressions are supported. - #[deprecated(note = "Use blocklist_item instead")] - pub fn blacklist_item>(mut self, arg: T) -> Builder { - self.options.blocklisted_items.insert(arg); - self - } - - /// Hide the given item from the generated bindings, regardless of - /// whether it's a type, function, module, etc. Regular - /// expressions are supported. - /// - /// To blocklist items prefixed with "mylib" use `"mylib_.*"`. - /// For more complicated expressions check - /// [regex](https://docs.rs/regex/*/regex/) docs - pub fn blocklist_item>(mut self, arg: T) -> Builder { - self.options.blocklisted_items.insert(arg); - self - } - - /// Hide any contents of the given file from the generated bindings, - /// regardless of whether it's a type, function, module etc. - pub fn blocklist_file>(mut self, arg: T) -> Builder { - self.options.blocklisted_files.insert(arg); - self - } - - /// Treat the given type as opaque in the generated bindings. Regular - /// expressions are supported. - /// - /// To change types prefixed with "mylib" into opaque, use `"mylib_.*"`. - /// For more complicated expressions check - /// [regex](https://docs.rs/regex/*/regex/) docs - pub fn opaque_type>(mut self, arg: T) -> Builder { - self.options.opaque_types.insert(arg); - self - } - - /// Allowlist the given type so that it (and all types that it transitively - /// refers to) appears in the generated bindings. Regular expressions are - /// supported. - #[deprecated(note = "use allowlist_type instead")] - pub fn whitelisted_type>(self, arg: T) -> Builder { - self.allowlist_type(arg) - } - - /// Allowlist the given type so that it (and all types that it transitively - /// refers to) appears in the generated bindings. Regular expressions are - /// supported. - #[deprecated(note = "use allowlist_type instead")] - pub fn whitelist_type>(self, arg: T) -> Builder { - self.allowlist_type(arg) - } - - /// Allowlist the given type so that it (and all types that it transitively - /// refers to) appears in the generated bindings. Regular expressions are - /// supported. - /// - /// To allowlist types prefixed with "mylib" use `"mylib_.*"`. - /// For more complicated expressions check - /// [regex](https://docs.rs/regex/*/regex/) docs - pub fn allowlist_type>(mut self, arg: T) -> Builder { - self.options.allowlisted_types.insert(arg); - self - } - - /// Allowlist the given function so that it (and all types that it - /// transitively refers to) appears in the generated bindings. Regular - /// expressions are supported. - /// - /// To allowlist functions prefixed with "mylib" use `"mylib_.*"`. - /// For more complicated expressions check - /// [regex](https://docs.rs/regex/*/regex/) docs - pub fn allowlist_function>(mut self, arg: T) -> Builder { - self.options.allowlisted_functions.insert(arg); - self - } - - /// Allowlist the given function. - /// - /// Deprecated: use allowlist_function instead. - #[deprecated(note = "use allowlist_function instead")] - pub fn whitelist_function>(self, arg: T) -> Builder { - self.allowlist_function(arg) - } - - /// Allowlist the given function. - /// - /// Deprecated: use allowlist_function instead. - #[deprecated(note = "use allowlist_function instead")] - pub fn whitelisted_function>(self, arg: T) -> Builder { - self.allowlist_function(arg) - } - - /// Allowlist the given variable so that it (and all types that it - /// transitively refers to) appears in the generated bindings. Regular - /// expressions are supported. - /// - /// To allowlist variables prefixed with "mylib" use `"mylib_.*"`. - /// For more complicated expressions check - /// [regex](https://docs.rs/regex/*/regex/) docs - pub fn allowlist_var>(mut self, arg: T) -> Builder { - self.options.allowlisted_vars.insert(arg); - self - } - - /// Deprecated: use allowlist_var instead. - #[deprecated(note = "use allowlist_var instead")] - pub fn whitelist_var>(self, arg: T) -> Builder { - self.allowlist_var(arg) - } - - /// Allowlist the given variable. - /// - /// Deprecated: use allowlist_var instead. - #[deprecated(note = "use allowlist_var instead")] - pub fn whitelisted_var>(self, arg: T) -> Builder { - self.allowlist_var(arg) - } - - /// Set the default style of code to generate for enums - pub fn default_enum_style( - mut self, - arg: codegen::EnumVariation, - ) -> Builder { - self.options.default_enum_style = arg; - self - } - - /// Mark the given enum (or set of enums, if using a pattern) as being - /// bitfield-like. Regular expressions are supported. - /// - /// This makes bindgen generate a type that isn't a rust `enum`. Regular - /// expressions are supported. - /// - /// This is similar to the newtype enum style, but with the bitwise - /// operators implemented. - pub fn bitfield_enum>(mut self, arg: T) -> Builder { - self.options.bitfield_enums.insert(arg); - self - } - - /// Mark the given enum (or set of enums, if using a pattern) as a newtype. - /// Regular expressions are supported. - /// - /// This makes bindgen generate a type that isn't a Rust `enum`. Regular - /// expressions are supported. - pub fn newtype_enum>(mut self, arg: T) -> Builder { - self.options.newtype_enums.insert(arg); - self - } - - /// Mark the given enum (or set of enums, if using a pattern) as a Rust - /// enum. - /// - /// This makes bindgen generate enums instead of constants. Regular - /// expressions are supported. - /// - /// **Use this with caution**, creating this in unsafe code - /// (including FFI) with an invalid value will invoke undefined behaviour. - /// You may want to use the newtype enum style instead. - pub fn rustified_enum>(mut self, arg: T) -> Builder { - self.options.rustified_enums.insert(arg); - self - } - - /// Mark the given enum (or set of enums, if using a pattern) as a Rust - /// enum with the `#[non_exhaustive]` attribute. - /// - /// This makes bindgen generate enums instead of constants. Regular - /// expressions are supported. - /// - /// **Use this with caution**, creating this in unsafe code - /// (including FFI) with an invalid value will invoke undefined behaviour. - /// You may want to use the newtype enum style instead. - pub fn rustified_non_exhaustive_enum>( - mut self, - arg: T, - ) -> Builder { - self.options.rustified_non_exhaustive_enums.insert(arg); - self - } - - /// Mark the given enum (or set of enums, if using a pattern) as a set of - /// constants that are not to be put into a module. - pub fn constified_enum>(mut self, arg: T) -> Builder { - self.options.constified_enums.insert(arg); - self - } - - /// Mark the given enum (or set of enums, if using a pattern) as a set of - /// constants that should be put into a module. - /// - /// This makes bindgen generate modules containing constants instead of - /// just constants. Regular expressions are supported. - pub fn constified_enum_module>(mut self, arg: T) -> Builder { - self.options.constified_enum_modules.insert(arg); - self - } - - /// Set the default type for macro constants - pub fn default_macro_constant_type( - mut self, - arg: codegen::MacroTypeVariation, - ) -> Builder { - self.options.default_macro_constant_type = arg; - self - } - - /// Set the default style of code to generate for typedefs - pub fn default_alias_style( - mut self, - arg: codegen::AliasVariation, - ) -> Builder { - self.options.default_alias_style = arg; - self - } - - /// Mark the given typedef alias (or set of aliases, if using a pattern) to - /// use regular Rust type aliasing. - /// - /// This is the default behavior and should be used if `default_alias_style` - /// was set to NewType or NewTypeDeref and you want to override it for a - /// set of typedefs. - pub fn type_alias>(mut self, arg: T) -> Builder { - self.options.type_alias.insert(arg); - self - } - - /// Mark the given typedef alias (or set of aliases, if using a pattern) to - /// be generated as a new type by having the aliased type be wrapped in a - /// #[repr(transparent)] struct. - /// - /// Used to enforce stricter type checking. - pub fn new_type_alias>(mut self, arg: T) -> Builder { - self.options.new_type_alias.insert(arg); - self - } - - /// Mark the given typedef alias (or set of aliases, if using a pattern) to - /// be generated as a new type by having the aliased type be wrapped in a - /// #[repr(transparent)] struct and also have an automatically generated - /// impl's of `Deref` and `DerefMut` to their aliased type. - pub fn new_type_alias_deref>(mut self, arg: T) -> Builder { - self.options.new_type_alias_deref.insert(arg); - self - } - - /// Add a string to prepend to the generated bindings. The string is passed - /// through without any modification. - pub fn raw_line>(mut self, arg: T) -> Self { - self.options.raw_lines.push(arg.into()); - self - } - - /// Add a given line to the beginning of module `mod`. - pub fn module_raw_line(mut self, mod_: T, line: U) -> Self - where - T: Into, - U: Into, - { - self.options - .module_lines - .entry(mod_.into()) - .or_insert_with(Vec::new) - .push(line.into()); - self - } - - /// Add a given set of lines to the beginning of module `mod`. - pub fn module_raw_lines(mut self, mod_: T, lines: I) -> Self - where - T: Into, - I: IntoIterator, - I::Item: Into, - { - self.options - .module_lines - .entry(mod_.into()) - .or_insert_with(Vec::new) - .extend(lines.into_iter().map(Into::into)); - self - } - - /// Add an argument to be passed straight through to clang. - pub fn clang_arg>(mut self, arg: T) -> Builder { - self.options.clang_args.push(arg.into()); - self - } - - /// Add arguments to be passed straight through to clang. - pub fn clang_args(mut self, iter: I) -> Builder - where - I: IntoIterator, - I::Item: AsRef, - { - for arg in iter { - self = self.clang_arg(arg.as_ref()) - } - self - } - - /// Emit bindings for builtin definitions (for example `__builtin_va_list`) - /// in the generated Rust. - pub fn emit_builtins(mut self) -> Builder { - self.options.builtins = true; - self - } - - /// Avoid converting floats to `f32`/`f64` by default. - pub fn no_convert_floats(mut self) -> Self { - self.options.convert_floats = false; - self - } - - /// Set whether layout tests should be generated. - pub fn layout_tests(mut self, doit: bool) -> Self { - self.options.layout_tests = doit; - self - } - - /// Set whether `Debug` should be implemented, if it can not be derived automatically. - pub fn impl_debug(mut self, doit: bool) -> Self { - self.options.impl_debug = doit; - self - } - - /// Set whether `PartialEq` should be implemented, if it can not be derived automatically. - pub fn impl_partialeq(mut self, doit: bool) -> Self { - self.options.impl_partialeq = doit; - self - } - - /// Set whether `Copy` should be derived by default. - pub fn derive_copy(mut self, doit: bool) -> Self { - self.options.derive_copy = doit; - self - } - - /// Set whether `Debug` should be derived by default. - pub fn derive_debug(mut self, doit: bool) -> Self { - self.options.derive_debug = doit; - self - } - - /// Set whether `Default` should be derived by default. - pub fn derive_default(mut self, doit: bool) -> Self { - self.options.derive_default = doit; - self - } - - /// Set whether `Hash` should be derived by default. - pub fn derive_hash(mut self, doit: bool) -> Self { - self.options.derive_hash = doit; - self - } - - /// Set whether `PartialOrd` should be derived by default. - /// If we don't compute partialord, we also cannot compute - /// ord. Set the derive_ord to `false` when doit is `false`. - pub fn derive_partialord(mut self, doit: bool) -> Self { - self.options.derive_partialord = doit; - if !doit { - self.options.derive_ord = false; - } - self - } - - /// Set whether `Ord` should be derived by default. - /// We can't compute `Ord` without computing `PartialOrd`, - /// so we set the same option to derive_partialord. - pub fn derive_ord(mut self, doit: bool) -> Self { - self.options.derive_ord = doit; - self.options.derive_partialord = doit; - self - } - - /// Set whether `PartialEq` should be derived by default. - /// - /// If we don't derive `PartialEq`, we also cannot derive `Eq`, so deriving - /// `Eq` is also disabled when `doit` is `false`. - pub fn derive_partialeq(mut self, doit: bool) -> Self { - self.options.derive_partialeq = doit; - if !doit { - self.options.derive_eq = false; - } - self - } - - /// Set whether `Eq` should be derived by default. - /// - /// We can't derive `Eq` without also deriving `PartialEq`, so we also - /// enable deriving `PartialEq` when `doit` is `true`. - pub fn derive_eq(mut self, doit: bool) -> Self { - self.options.derive_eq = doit; - if doit { - self.options.derive_partialeq = doit; - } - self - } - - /// Set whether or not to time bindgen phases, and print information to - /// stderr. - pub fn time_phases(mut self, doit: bool) -> Self { - self.options.time_phases = doit; - self - } - - /// Emit Clang AST. - pub fn emit_clang_ast(mut self) -> Builder { - self.options.emit_ast = true; - self - } - - /// Emit IR. - pub fn emit_ir(mut self) -> Builder { - self.options.emit_ir = true; - self - } - - /// Enable C++ namespaces. - pub fn enable_cxx_namespaces(mut self) -> Builder { - self.options.enable_cxx_namespaces = true; - self - } - - /// Enable detecting must_use attributes on C functions. - /// - /// This is quite slow in some cases (see #1465), so it's disabled by - /// default. - /// - /// Note that for this to do something meaningful for now at least, the rust - /// target version has to have support for `#[must_use]`. - pub fn enable_function_attribute_detection(mut self) -> Self { - self.options.enable_function_attribute_detection = true; - self - } - - /// Disable name auto-namespacing. - /// - /// By default, bindgen mangles names like `foo::bar::Baz` to look like - /// `foo_bar_Baz` instead of just `Baz`. - /// - /// This method disables that behavior. - /// - /// Note that this intentionally does not change the names used for - /// allowlisting and blocklisting, which should still be mangled with the - /// namespaces. - /// - /// Note, also, that this option may cause bindgen to generate duplicate - /// names. - pub fn disable_name_namespacing(mut self) -> Builder { - self.options.disable_name_namespacing = true; - self - } - - /// Disable nested struct naming. - /// - /// The following structs have different names for C and C++. In case of C - /// they are visible as `foo` and `bar`. In case of C++ they are visible as - /// `foo` and `foo::bar`. - /// - /// ```c - /// struct foo { - /// struct bar { - /// } b; - /// }; - /// ``` - /// - /// Bindgen wants to avoid duplicate names by default so it follows C++ naming - /// and it generates `foo`/`foo_bar` instead of just `foo`/`bar`. - /// - /// This method disables this behavior and it is indented to be used only - /// for headers that were written for C. - pub fn disable_nested_struct_naming(mut self) -> Builder { - self.options.disable_nested_struct_naming = true; - self - } - - /// Treat inline namespaces conservatively. - /// - /// This is tricky, because in C++ is technically legal to override an item - /// defined in an inline namespace: - /// - /// ```cpp - /// inline namespace foo { - /// using Bar = int; - /// } - /// using Bar = long; - /// ``` - /// - /// Even though referencing `Bar` is a compiler error. - /// - /// We want to support this (arguably esoteric) use case, but we don't want - /// to make the rest of bindgen users pay an usability penalty for that. - /// - /// To support this, we need to keep all the inline namespaces around, but - /// then bindgen usage is a bit more difficult, because you cannot - /// reference, e.g., `std::string` (you'd need to use the proper inline - /// namespace). - /// - /// We could complicate a lot of the logic to detect name collisions, and if - /// not detected generate a `pub use inline_ns::*` or something like that. - /// - /// That's probably something we can do if we see this option is needed in a - /// lot of cases, to improve it's usability, but my guess is that this is - /// not going to be too useful. - pub fn conservative_inline_namespaces(mut self) -> Builder { - self.options.conservative_inline_namespaces = true; - self - } - - /// Whether inline functions should be generated or not. - /// - /// Note that they will usually not work. However you can use - /// `-fkeep-inline-functions` or `-fno-inline-functions` if you are - /// responsible of compiling the library to make them callable. - pub fn generate_inline_functions(mut self, doit: bool) -> Self { - self.options.generate_inline_functions = doit; - self - } - - /// Ignore functions. - pub fn ignore_functions(mut self) -> Builder { - self.options.codegen_config.remove(CodegenConfig::FUNCTIONS); - self - } - - /// Ignore methods. - pub fn ignore_methods(mut self) -> Builder { - self.options.codegen_config.remove(CodegenConfig::METHODS); - self - } - - /// Avoid generating any unstable Rust, such as Rust unions, in the generated bindings. - #[deprecated(note = "please use `rust_target` instead")] - pub fn unstable_rust(self, doit: bool) -> Self { - let rust_target = if doit { - RustTarget::Nightly - } else { - LATEST_STABLE_RUST - }; - self.rust_target(rust_target) - } - - /// Use core instead of libstd in the generated bindings. - pub fn use_core(mut self) -> Builder { - self.options.use_core = true; - self - } - - /// Use the given prefix for the raw types instead of `::std::os::raw`. - pub fn ctypes_prefix>(mut self, prefix: T) -> Builder { - self.options.ctypes_prefix = Some(prefix.into()); - self - } - - /// Use the given prefix for the anon fields. - pub fn anon_fields_prefix>(mut self, prefix: T) -> Builder { - self.options.anon_fields_prefix = prefix.into(); - self - } - - /// Allows configuring types in different situations, see the - /// [`ParseCallbacks`](./callbacks/trait.ParseCallbacks.html) documentation. - pub fn parse_callbacks( - mut self, - cb: Box, - ) -> Self { - self.options.parse_callbacks = Some(cb); - self - } - - /// Choose what to generate using a - /// [`CodegenConfig`](./struct.CodegenConfig.html). - pub fn with_codegen_config(mut self, config: CodegenConfig) -> Self { - self.options.codegen_config = config; - self - } - - /// Whether to detect include paths using clang_sys. - pub fn detect_include_paths(mut self, doit: bool) -> Self { - self.options.detect_include_paths = doit; - self - } - - /// Whether to try to fit macro constants to types smaller than u32/i32 - pub fn fit_macro_constants(mut self, doit: bool) -> Self { - self.options.fit_macro_constants = doit; - self - } - - /// Prepend the enum name to constant or newtype variants. - pub fn prepend_enum_name(mut self, doit: bool) -> Self { - self.options.prepend_enum_name = doit; - self - } - - /// Set whether `size_t` should be translated to `usize` automatically. - pub fn size_t_is_usize(mut self, is: bool) -> Self { - self.options.size_t_is_usize = is; - self - } - - /// Set whether rustfmt should format the generated bindings. - pub fn rustfmt_bindings(mut self, doit: bool) -> Self { - self.options.rustfmt_bindings = doit; - self - } - - /// Set whether we should record matched items in our regex sets. - pub fn record_matches(mut self, doit: bool) -> Self { - self.options.record_matches = doit; - self - } - - /// Set the absolute path to the rustfmt configuration file, if None, the standard rustfmt - /// options are used. - pub fn rustfmt_configuration_file(mut self, path: Option) -> Self { - self = self.rustfmt_bindings(true); - self.options.rustfmt_configuration_file = path; - self - } - - /// Sets an explicit path to rustfmt, to be used when rustfmt is enabled. - pub fn with_rustfmt>(mut self, path: P) -> Self { - self.options.rustfmt_path = Some(path.into()); - self - } - - /// If true, always emit explicit padding fields. - /// - /// If a struct needs to be serialized in its native format (padding bytes - /// and all), for example writing it to a file or sending it on the network, - /// then this should be enabled, as anything reading the padding bytes of - /// a struct may lead to Undefined Behavior. - pub fn explicit_padding(mut self, doit: bool) -> Self { - self.options.force_explicit_padding = doit; - self - } - - /// Generate the Rust bindings using the options built up thus far. - pub fn generate(mut self) -> Result { - // Add any extra arguments from the environment to the clang command line. - if let Some(extra_clang_args) = - get_target_dependent_env_var("BINDGEN_EXTRA_CLANG_ARGS") - { - // Try to parse it with shell quoting. If we fail, make it one single big argument. - if let Some(strings) = shlex::split(&extra_clang_args) { - self.options.clang_args.extend(strings); - } else { - self.options.clang_args.push(extra_clang_args); - }; - } - - // Transform input headers to arguments on the clang command line. - self.options.input_header = self.input_headers.pop(); - self.options.extra_input_headers = self.input_headers; - self.options.clang_args.extend( - self.options.extra_input_headers.iter().flat_map(|header| { - iter::once("-include".into()) - .chain(iter::once(header.to_string())) - }), - ); - - self.options.input_unsaved_files.extend( - self.input_header_contents - .drain(..) - .map(|(name, contents)| { - clang::UnsavedFile::new(&name, &contents) - }), - ); - - Bindings::generate(self.options) - } - - /// Preprocess and dump the input header files to disk. - /// - /// This is useful when debugging bindgen, using C-Reduce, or when filing - /// issues. The resulting file will be named something like `__bindgen.i` or - /// `__bindgen.ii` - pub fn dump_preprocessed_input(&self) -> io::Result<()> { - let clang = - clang_sys::support::Clang::find(None, &[]).ok_or_else(|| { - io::Error::new( - io::ErrorKind::Other, - "Cannot find clang executable", - ) - })?; - - // The contents of a wrapper file that includes all the input header - // files. - let mut wrapper_contents = String::new(); - - // Whether we are working with C or C++ inputs. - let mut is_cpp = args_are_cpp(&self.options.clang_args); - - // For each input header, add `#include "$header"`. - for header in &self.input_headers { - is_cpp |= file_is_cpp(header); - - wrapper_contents.push_str("#include \""); - wrapper_contents.push_str(header); - wrapper_contents.push_str("\"\n"); - } - - // For each input header content, add a prefix line of `#line 0 "$name"` - // followed by the contents. - for &(ref name, ref contents) in &self.input_header_contents { - is_cpp |= file_is_cpp(name); - - wrapper_contents.push_str("#line 0 \""); - wrapper_contents.push_str(name); - wrapper_contents.push_str("\"\n"); - wrapper_contents.push_str(contents); - } - - let wrapper_path = PathBuf::from(if is_cpp { - "__bindgen.cpp" - } else { - "__bindgen.c" - }); - - { - let mut wrapper_file = File::create(&wrapper_path)?; - wrapper_file.write_all(wrapper_contents.as_bytes())?; - } - - let mut cmd = Command::new(&clang.path); - cmd.arg("-save-temps") - .arg("-E") - .arg("-C") - .arg("-c") - .arg(&wrapper_path) - .stdout(Stdio::piped()); - - for a in &self.options.clang_args { - cmd.arg(a); - } - - let mut child = cmd.spawn()?; - - let mut preprocessed = child.stdout.take().unwrap(); - let mut file = File::create(if is_cpp { - "__bindgen.ii" - } else { - "__bindgen.i" - })?; - io::copy(&mut preprocessed, &mut file)?; - - if child.wait()?.success() { - Ok(()) - } else { - Err(io::Error::new( - io::ErrorKind::Other, - "clang exited with non-zero status", - )) - } - } - - /// Don't derive `PartialEq` for a given type. Regular - /// expressions are supported. - pub fn no_partialeq>(mut self, arg: T) -> Builder { - self.options.no_partialeq_types.insert(arg.into()); - self - } - - /// Don't derive `Copy` for a given type. Regular - /// expressions are supported. - pub fn no_copy>(mut self, arg: T) -> Self { - self.options.no_copy_types.insert(arg.into()); - self - } - - /// Don't derive `Debug` for a given type. Regular - /// expressions are supported. - pub fn no_debug>(mut self, arg: T) -> Self { - self.options.no_debug_types.insert(arg.into()); - self - } - - /// Don't derive/impl `Default` for a given type. Regular - /// expressions are supported. - pub fn no_default>(mut self, arg: T) -> Self { - self.options.no_default_types.insert(arg.into()); - self - } - - /// Don't derive `Hash` for a given type. Regular - /// expressions are supported. - pub fn no_hash>(mut self, arg: T) -> Builder { - self.options.no_hash_types.insert(arg.into()); - self - } - - /// Add `#[must_use]` for the given type. Regular - /// expressions are supported. - pub fn must_use_type>(mut self, arg: T) -> Builder { - self.options.must_use_types.insert(arg.into()); - self - } - - /// Set whether `arr[size]` should be treated as `*mut T` or `*mut [T; size]` (same for mut) - pub fn array_pointers_in_arguments(mut self, doit: bool) -> Self { - self.options.array_pointers_in_arguments = doit; - self - } - - /// Set the wasm import module name - pub fn wasm_import_module_name>( - mut self, - import_name: T, - ) -> Self { - self.options.wasm_import_module_name = Some(import_name.into()); - self - } - - /// Specify the dynamic library name if we are generating bindings for a shared library. - pub fn dynamic_library_name>( - mut self, - dynamic_library_name: T, - ) -> Self { - self.options.dynamic_library_name = Some(dynamic_library_name.into()); - self - } - - /// Require successful linkage for all routines in a shared library. - /// This allows us to optimize function calls by being able to safely assume function pointers - /// are valid. - pub fn dynamic_link_require_all(mut self, req: bool) -> Self { - self.options.dynamic_link_require_all = req; - self - } - - /// Generate bindings as `pub` only if the bound item is publically accessible by C++. - pub fn respect_cxx_access_specs(mut self, doit: bool) -> Self { - self.options.respect_cxx_access_specs = doit; - self - } - - /// Always translate enum integer types to native Rust integer types. - /// - /// This will result in enums having types such as `u32` and `i16` instead - /// of `c_uint` and `c_short`. Types for Rustified enums are always - /// translated. - pub fn translate_enum_integer_types(mut self, doit: bool) -> Self { - self.options.translate_enum_integer_types = doit; - self - } - - /// Generate types with C style naming. - /// - /// This will add prefixes to the generated type names. For example instead of a struct `A` we - /// will generate struct `struct_A`. Currently applies to structs, unions, and enums. - pub fn c_naming(mut self, doit: bool) -> Self { - self.options.c_naming = doit; - self - } -} - -/// Configuration options for generated bindings. -#[derive(Debug)] -struct BindgenOptions { - /// The set of types that have been blocklisted and should not appear - /// anywhere in the generated code. - blocklisted_types: RegexSet, - - /// The set of functions that have been blocklisted and should not appear - /// in the generated code. - blocklisted_functions: RegexSet, - - /// The set of items, regardless of item-type, that have been - /// blocklisted and should not appear in the generated code. - blocklisted_items: RegexSet, - - /// The set of files whose contents should be blocklisted and should not - /// appear in the generated code. - blocklisted_files: RegexSet, - - /// The set of types that should be treated as opaque structures in the - /// generated code. - opaque_types: RegexSet, - - /// The explicit rustfmt path. - rustfmt_path: Option, - - /// The path to which we should write a Makefile-syntax depfile (if any). - depfile: Option, - - /// The set of types that we should have bindings for in the generated - /// code. - /// - /// This includes all types transitively reachable from any type in this - /// set. One might think of allowlisted types/vars/functions as GC roots, - /// and the generated Rust code as including everything that gets marked. - allowlisted_types: RegexSet, - - /// Allowlisted functions. See docs for `allowlisted_types` for more. - allowlisted_functions: RegexSet, - - /// Allowlisted variables. See docs for `allowlisted_types` for more. - allowlisted_vars: RegexSet, - - /// The default style of code to generate for enums - default_enum_style: codegen::EnumVariation, - - /// The enum patterns to mark an enum as a bitfield - /// (newtype with bitwise operations). - bitfield_enums: RegexSet, - - /// The enum patterns to mark an enum as a newtype. - newtype_enums: RegexSet, - - /// The enum patterns to mark an enum as a Rust enum. - rustified_enums: RegexSet, - - /// The enum patterns to mark an enum as a non-exhaustive Rust enum. - rustified_non_exhaustive_enums: RegexSet, - - /// The enum patterns to mark an enum as a module of constants. - constified_enum_modules: RegexSet, - - /// The enum patterns to mark an enum as a set of constants. - constified_enums: RegexSet, - - /// The default type for C macro constants. - default_macro_constant_type: codegen::MacroTypeVariation, - - /// The default style of code to generate for typedefs. - default_alias_style: codegen::AliasVariation, - - /// Typedef patterns that will use regular type aliasing. - type_alias: RegexSet, - - /// Typedef patterns that will be aliased by creating a new struct. - new_type_alias: RegexSet, - - /// Typedef patterns that will be wrapped in a new struct and have - /// Deref and Deref to their aliased type. - new_type_alias_deref: RegexSet, - - /// Whether we should generate builtins or not. - builtins: bool, - - /// True if we should dump the Clang AST for debugging purposes. - emit_ast: bool, - - /// True if we should dump our internal IR for debugging purposes. - emit_ir: bool, - - /// Output graphviz dot file. - emit_ir_graphviz: Option, - - /// True if we should emulate C++ namespaces with Rust modules in the - /// generated bindings. - enable_cxx_namespaces: bool, - - /// True if we should try to find unexposed attributes in functions, in - /// order to be able to generate #[must_use] attributes in Rust. - enable_function_attribute_detection: bool, - - /// True if we should avoid mangling names with namespaces. - disable_name_namespacing: bool, - - /// True if we should avoid generating nested struct names. - disable_nested_struct_naming: bool, - - /// True if we should avoid embedding version identifiers into source code. - disable_header_comment: bool, - - /// True if we should generate layout tests for generated structures. - layout_tests: bool, - - /// True if we should implement the Debug trait for C/C++ structures and types - /// that do not support automatically deriving Debug. - impl_debug: bool, - - /// True if we should implement the PartialEq trait for C/C++ structures and types - /// that do not support automatically deriving PartialEq. - impl_partialeq: bool, - - /// True if we should derive Copy trait implementations for C/C++ structures - /// and types. - derive_copy: bool, - - /// True if we should derive Debug trait implementations for C/C++ structures - /// and types. - derive_debug: bool, - - /// True if we should derive Default trait implementations for C/C++ structures - /// and types. - derive_default: bool, - - /// True if we should derive Hash trait implementations for C/C++ structures - /// and types. - derive_hash: bool, - - /// True if we should derive PartialOrd trait implementations for C/C++ structures - /// and types. - derive_partialord: bool, - - /// True if we should derive Ord trait implementations for C/C++ structures - /// and types. - derive_ord: bool, - - /// True if we should derive PartialEq trait implementations for C/C++ structures - /// and types. - derive_partialeq: bool, - - /// True if we should derive Eq trait implementations for C/C++ structures - /// and types. - derive_eq: bool, - - /// True if we should avoid using libstd to use libcore instead. - use_core: bool, - - /// An optional prefix for the "raw" types, like `c_int`, `c_void`... - ctypes_prefix: Option, - - /// The prefix for the anon fields. - anon_fields_prefix: String, - - /// Whether to time the bindgen phases. - time_phases: bool, - - /// True if we should generate constant names that are **directly** under - /// namespaces. - namespaced_constants: bool, - - /// True if we should use MSVC name mangling rules. - msvc_mangling: bool, - - /// Whether we should convert float types to f32/f64 types. - convert_floats: bool, - - /// The set of raw lines to prepend to the top-level module of generated - /// Rust code. - raw_lines: Vec, - - /// The set of raw lines to prepend to each of the modules. - /// - /// This only makes sense if the `enable_cxx_namespaces` option is set. - module_lines: HashMap>, - - /// The set of arguments to pass straight through to Clang. - clang_args: Vec, - - /// The input header file. - input_header: Option, - - /// Any additional input header files. - extra_input_headers: Vec, - - /// Unsaved files for input. - input_unsaved_files: Vec, - - /// A user-provided visitor to allow customizing different kinds of - /// situations. - parse_callbacks: Option>, - - /// Which kind of items should we generate? By default, we'll generate all - /// of them. - codegen_config: CodegenConfig, - - /// Whether to treat inline namespaces conservatively. - /// - /// See the builder method description for more details. - conservative_inline_namespaces: bool, - - /// Whether to keep documentation comments in the generated output. See the - /// documentation for more details. Defaults to true. - generate_comments: bool, - - /// Whether to generate inline functions. Defaults to false. - generate_inline_functions: bool, - - /// Whether to allowlist types recursively. Defaults to true. - allowlist_recursively: bool, - - /// Instead of emitting 'use objc;' to files generated from objective c files, - /// generate '#[macro_use] extern crate objc;' - objc_extern_crate: bool, - - /// Instead of emitting 'use block;' to files generated from objective c files, - /// generate '#[macro_use] extern crate block;' - generate_block: bool, - - /// Instead of emitting 'use block;' to files generated from objective c files, - /// generate '#[macro_use] extern crate block;' - block_extern_crate: bool, - - /// Whether to use the clang-provided name mangling. This is true and - /// probably needed for C++ features. - /// - /// However, some old libclang versions seem to return incorrect results in - /// some cases for non-mangled functions, see [1], so we allow disabling it. - /// - /// [1]: https://github.com/rust-lang/rust-bindgen/issues/528 - enable_mangling: bool, - - /// Whether to detect include paths using clang_sys. - detect_include_paths: bool, - - /// Whether to try to fit macro constants into types smaller than u32/i32 - fit_macro_constants: bool, - - /// Whether to prepend the enum name to constant or newtype variants. - prepend_enum_name: bool, - - /// Version of the Rust compiler to target - rust_target: RustTarget, - - /// Features to enable, derived from `rust_target` - rust_features: RustFeatures, - - /// Whether we should record which items in the regex sets ever matched. - /// - /// This may be a bit slower, but will enable reporting of unused allowlist - /// items via the `error!` log. - record_matches: bool, - - /// Whether `size_t` should be translated to `usize` automatically. - size_t_is_usize: bool, - - /// Whether rustfmt should format the generated bindings. - rustfmt_bindings: bool, - - /// The absolute path to the rustfmt configuration file, if None, the standard rustfmt - /// options are used. - rustfmt_configuration_file: Option, - - /// The set of types that we should not derive `PartialEq` for. - no_partialeq_types: RegexSet, - - /// The set of types that we should not derive `Copy` for. - no_copy_types: RegexSet, - - /// The set of types that we should not derive `Debug` for. - no_debug_types: RegexSet, - - /// The set of types that we should not derive/impl `Default` for. - no_default_types: RegexSet, - - /// The set of types that we should not derive `Hash` for. - no_hash_types: RegexSet, - - /// The set of types that we should be annotated with `#[must_use]`. - must_use_types: RegexSet, - - /// Decide if C arrays should be regular pointers in rust or array pointers - array_pointers_in_arguments: bool, - - /// Wasm import module name. - wasm_import_module_name: Option, - - /// The name of the dynamic library (if we are generating bindings for a shared library). If - /// this is None, no dynamic bindings are created. - dynamic_library_name: Option, - - /// Require successful linkage for all routines in a shared library. - /// This allows us to optimize function calls by being able to safely assume function pointers - /// are valid. No effect if `dynamic_library_name` is None. - dynamic_link_require_all: bool, - - /// Only make generated bindings `pub` if the items would be publically accessible - /// by C++. - respect_cxx_access_specs: bool, - - /// Always translate enum integer types to native Rust integer types. - translate_enum_integer_types: bool, - - /// Generate types with C style naming. - c_naming: bool, - - /// Always output explicit padding fields - force_explicit_padding: bool, -} - -/// TODO(emilio): This is sort of a lie (see the error message that results from -/// removing this), but since we don't share references across panic boundaries -/// it's ok. -impl ::std::panic::UnwindSafe for BindgenOptions {} - -impl BindgenOptions { - fn build(&mut self) { - let mut regex_sets = [ - &mut self.allowlisted_vars, - &mut self.allowlisted_types, - &mut self.allowlisted_functions, - &mut self.blocklisted_types, - &mut self.blocklisted_functions, - &mut self.blocklisted_items, - &mut self.blocklisted_files, - &mut self.opaque_types, - &mut self.bitfield_enums, - &mut self.constified_enums, - &mut self.constified_enum_modules, - &mut self.newtype_enums, - &mut self.rustified_enums, - &mut self.rustified_non_exhaustive_enums, - &mut self.type_alias, - &mut self.new_type_alias, - &mut self.new_type_alias_deref, - &mut self.no_partialeq_types, - &mut self.no_copy_types, - &mut self.no_debug_types, - &mut self.no_default_types, - &mut self.no_hash_types, - &mut self.must_use_types, - ]; - let record_matches = self.record_matches; - for regex_set in &mut regex_sets { - regex_set.build(record_matches); - } - } - - /// Update rust target version - pub fn set_rust_target(&mut self, rust_target: RustTarget) { - self.rust_target = rust_target; - - // Keep rust_features synced with rust_target - self.rust_features = rust_target.into(); - } - - /// Get features supported by target Rust version - pub fn rust_features(&self) -> RustFeatures { - self.rust_features - } -} - -impl Default for BindgenOptions { - fn default() -> BindgenOptions { - let rust_target = RustTarget::default(); - - BindgenOptions { - rust_target, - rust_features: rust_target.into(), - blocklisted_types: Default::default(), - blocklisted_functions: Default::default(), - blocklisted_items: Default::default(), - blocklisted_files: Default::default(), - opaque_types: Default::default(), - rustfmt_path: Default::default(), - depfile: Default::default(), - allowlisted_types: Default::default(), - allowlisted_functions: Default::default(), - allowlisted_vars: Default::default(), - default_enum_style: Default::default(), - bitfield_enums: Default::default(), - newtype_enums: Default::default(), - rustified_enums: Default::default(), - rustified_non_exhaustive_enums: Default::default(), - constified_enums: Default::default(), - constified_enum_modules: Default::default(), - default_macro_constant_type: Default::default(), - default_alias_style: Default::default(), - type_alias: Default::default(), - new_type_alias: Default::default(), - new_type_alias_deref: Default::default(), - builtins: false, - emit_ast: false, - emit_ir: false, - emit_ir_graphviz: None, - layout_tests: true, - impl_debug: false, - impl_partialeq: false, - derive_copy: true, - derive_debug: true, - derive_default: false, - derive_hash: false, - derive_partialord: false, - derive_ord: false, - derive_partialeq: false, - derive_eq: false, - enable_cxx_namespaces: false, - enable_function_attribute_detection: false, - disable_name_namespacing: false, - disable_nested_struct_naming: false, - disable_header_comment: false, - use_core: false, - ctypes_prefix: None, - anon_fields_prefix: DEFAULT_ANON_FIELDS_PREFIX.into(), - namespaced_constants: true, - msvc_mangling: false, - convert_floats: true, - raw_lines: vec![], - module_lines: HashMap::default(), - clang_args: vec![], - input_header: None, - extra_input_headers: vec![], - input_unsaved_files: vec![], - parse_callbacks: None, - codegen_config: CodegenConfig::all(), - conservative_inline_namespaces: false, - generate_comments: true, - generate_inline_functions: false, - allowlist_recursively: true, - generate_block: false, - objc_extern_crate: false, - block_extern_crate: false, - enable_mangling: true, - detect_include_paths: true, - fit_macro_constants: false, - prepend_enum_name: true, - time_phases: false, - record_matches: true, - rustfmt_bindings: true, - size_t_is_usize: false, - rustfmt_configuration_file: None, - no_partialeq_types: Default::default(), - no_copy_types: Default::default(), - no_debug_types: Default::default(), - no_default_types: Default::default(), - no_hash_types: Default::default(), - must_use_types: Default::default(), - array_pointers_in_arguments: false, - wasm_import_module_name: None, - dynamic_library_name: None, - dynamic_link_require_all: false, - respect_cxx_access_specs: false, - translate_enum_integer_types: false, - c_naming: false, - force_explicit_padding: false, - } - } -} - -#[cfg(feature = "runtime")] -fn ensure_libclang_is_loaded() { - if clang_sys::is_loaded() { - return; - } - - // XXX (issue #350): Ensure that our dynamically loaded `libclang` - // doesn't get dropped prematurely, nor is loaded multiple times - // across different threads. - - lazy_static! { - static ref LIBCLANG: std::sync::Arc = { - clang_sys::load().expect("Unable to find libclang"); - clang_sys::get_library().expect( - "We just loaded libclang and it had better still be \ - here!", - ) - }; - } - - clang_sys::set_library(Some(LIBCLANG.clone())); -} - -#[cfg(not(feature = "runtime"))] -fn ensure_libclang_is_loaded() {} - -/// Generated Rust bindings. -#[derive(Debug)] -pub struct Bindings { - options: BindgenOptions, - module: proc_macro2::TokenStream, -} - -pub(crate) const HOST_TARGET: &str = - include_str!(concat!(env!("OUT_DIR"), "/host-target.txt")); - -// Some architecture triplets are different between rust and libclang, see #1211 -// and duplicates. -fn rust_to_clang_target(rust_target: &str) -> String { - if rust_target.starts_with("aarch64-apple-") { - let mut clang_target = "arm64-apple-".to_owned(); - clang_target - .push_str(rust_target.strip_prefix("aarch64-apple-").unwrap()); - return clang_target; - } - rust_target.to_owned() -} - -/// Returns the effective target, and whether it was explicitly specified on the -/// clang flags. -fn find_effective_target(clang_args: &[String]) -> (String, bool) { - let mut args = clang_args.iter(); - while let Some(opt) = args.next() { - if opt.starts_with("--target=") { - let mut split = opt.split('='); - split.next(); - return (split.next().unwrap().to_owned(), true); - } - - if opt == "-target" { - if let Some(target) = args.next() { - return (target.clone(), true); - } - } - } - - // If we're running from a build script, try to find the cargo target. - if let Ok(t) = env::var("TARGET") { - return (rust_to_clang_target(&t), false); - } - - (rust_to_clang_target(HOST_TARGET), false) -} - -impl Bindings { - /// Generate bindings for the given options. - pub(crate) fn generate( - mut options: BindgenOptions, - ) -> Result { - ensure_libclang_is_loaded(); - - #[cfg(feature = "runtime")] - debug!( - "Generating bindings, libclang at {}", - clang_sys::get_library().unwrap().path().display() - ); - #[cfg(not(feature = "runtime"))] - debug!("Generating bindings, libclang linked"); - - options.build(); - - let (effective_target, explicit_target) = - find_effective_target(&options.clang_args); - - let is_host_build = - rust_to_clang_target(HOST_TARGET) == effective_target; - - // NOTE: The is_host_build check wouldn't be sound normally in some - // cases if we were to call a binary (if you have a 32-bit clang and are - // building on a 64-bit system for example). But since we rely on - // opening libclang.so, it has to be the same architecture and thus the - // check is fine. - if !explicit_target && !is_host_build { - options - .clang_args - .insert(0, format!("--target={}", effective_target)); - }; - - fn detect_include_paths(options: &mut BindgenOptions) { - if !options.detect_include_paths { - return; - } - - // Filter out include paths and similar stuff, so we don't incorrectly - // promote them to `-isystem`. - let clang_args_for_clang_sys = { - let mut last_was_include_prefix = false; - options - .clang_args - .iter() - .filter(|arg| { - if last_was_include_prefix { - last_was_include_prefix = false; - return false; - } - - let arg = &**arg; - - // https://clang.llvm.org/docs/ClangCommandLineReference.html - // -isystem and -isystem-after are harmless. - if arg == "-I" || arg == "--include-directory" { - last_was_include_prefix = true; - return false; - } - - if arg.starts_with("-I") || - arg.starts_with("--include-directory=") - { - return false; - } - - true - }) - .cloned() - .collect::>() - }; - - debug!( - "Trying to find clang with flags: {:?}", - clang_args_for_clang_sys - ); - - let clang = match clang_sys::support::Clang::find( - None, - &clang_args_for_clang_sys, - ) { - None => return, - Some(clang) => clang, - }; - - debug!("Found clang: {:?}", clang); - - // Whether we are working with C or C++ inputs. - let is_cpp = args_are_cpp(&options.clang_args) || - options.input_header.as_deref().map_or(false, file_is_cpp); - - let search_paths = if is_cpp { - clang.cpp_search_paths - } else { - clang.c_search_paths - }; - - if let Some(search_paths) = search_paths { - for path in search_paths.into_iter() { - if let Ok(path) = path.into_os_string().into_string() { - options.clang_args.push("-isystem".to_owned()); - options.clang_args.push(path); - } - } - } - } - - detect_include_paths(&mut options); - - #[cfg(unix)] - fn can_read(perms: &std::fs::Permissions) -> bool { - use std::os::unix::fs::PermissionsExt; - perms.mode() & 0o444 > 0 - } - - #[cfg(not(unix))] - fn can_read(_: &std::fs::Permissions) -> bool { - true - } - - if let Some(h) = options.input_header.as_ref() { - if let Ok(md) = std::fs::metadata(h) { - if md.is_dir() { - eprintln!("error: '{}' is a folder", h); - return Err(()); - } - if !can_read(&md.permissions()) { - eprintln!( - "error: insufficient permissions to read '{}'", - h - ); - return Err(()); - } - options.clang_args.push(h.clone()) - } else { - eprintln!("error: header '{}' does not exist.", h); - return Err(()); - } - } - - for (idx, f) in options.input_unsaved_files.iter().enumerate() { - if idx != 0 || options.input_header.is_some() { - options.clang_args.push("-include".to_owned()); - } - options.clang_args.push(f.name.to_str().unwrap().to_owned()) - } - - debug!("Fixed-up options: {:?}", options); - - let time_phases = options.time_phases; - let mut context = BindgenContext::new(options); - - if is_host_build { - debug_assert_eq!( - context.target_pointer_size(), - std::mem::size_of::<*mut ()>(), - "{:?} {:?}", - effective_target, - HOST_TARGET - ); - } - - { - let _t = time::Timer::new("parse").with_output(time_phases); - parse(&mut context)?; - } - - let (items, options) = codegen::codegen(context); - - Ok(Bindings { - options, - module: quote! { - #( #items )* - }, - }) - } - - /// Write these bindings as source text to a file. - pub fn write_to_file>(&self, path: P) -> io::Result<()> { - let file = OpenOptions::new() - .write(true) - .truncate(true) - .create(true) - .open(path.as_ref())?; - self.write(Box::new(file))?; - Ok(()) - } - - /// Write these bindings as source text to the given `Write`able. - pub fn write<'a>(&self, mut writer: Box) -> io::Result<()> { - if !self.options.disable_header_comment { - let version = option_env!("CARGO_PKG_VERSION"); - let header = format!( - "/* automatically generated by rust-bindgen {} */\n\n", - version.unwrap_or("(unknown version)") - ); - writer.write_all(header.as_bytes())?; - } - - for line in self.options.raw_lines.iter() { - writer.write_all(line.as_bytes())?; - writer.write_all("\n".as_bytes())?; - } - - if !self.options.raw_lines.is_empty() { - writer.write_all("\n".as_bytes())?; - } - - let bindings = self.module.to_string(); - - match self.rustfmt_generated_string(&bindings) { - Ok(rustfmt_bindings) => { - writer.write_all(rustfmt_bindings.as_bytes())?; - } - Err(err) => { - eprintln!( - "Failed to run rustfmt: {} (non-fatal, continuing)", - err - ); - writer.write_all(bindings.as_bytes())?; - } - } - Ok(()) - } - - /// Gets the rustfmt path to rustfmt the generated bindings. - fn rustfmt_path(&self) -> io::Result> { - debug_assert!(self.options.rustfmt_bindings); - if let Some(ref p) = self.options.rustfmt_path { - return Ok(Cow::Borrowed(p)); - } - if let Ok(rustfmt) = env::var("RUSTFMT") { - return Ok(Cow::Owned(rustfmt.into())); - } - #[cfg(feature = "which-rustfmt")] - match which::which("rustfmt") { - Ok(p) => Ok(Cow::Owned(p)), - Err(e) => { - Err(io::Error::new(io::ErrorKind::Other, format!("{}", e))) - } - } - #[cfg(not(feature = "which-rustfmt"))] - // No rustfmt binary was specified, so assume that the binary is called - // "rustfmt" and that it is in the user's PATH. - Ok(Cow::Owned("rustfmt".into())) - } - - /// Checks if rustfmt_bindings is set and runs rustfmt on the string - fn rustfmt_generated_string<'a>( - &self, - source: &'a str, - ) -> io::Result> { - let _t = time::Timer::new("rustfmt_generated_string") - .with_output(self.options.time_phases); - - if !self.options.rustfmt_bindings { - return Ok(Cow::Borrowed(source)); - } - - let rustfmt = self.rustfmt_path()?; - let mut cmd = Command::new(&*rustfmt); - - cmd.stdin(Stdio::piped()).stdout(Stdio::piped()); - - if let Some(path) = self - .options - .rustfmt_configuration_file - .as_ref() - .and_then(|f| f.to_str()) - { - cmd.args(&["--config-path", path]); - } - - let mut child = cmd.spawn()?; - let mut child_stdin = child.stdin.take().unwrap(); - let mut child_stdout = child.stdout.take().unwrap(); - - let source = source.to_owned(); - - // Write to stdin in a new thread, so that we can read from stdout on this - // thread. This keeps the child from blocking on writing to its stdout which - // might block us from writing to its stdin. - let stdin_handle = ::std::thread::spawn(move || { - let _ = child_stdin.write_all(source.as_bytes()); - source - }); - - let mut output = vec![]; - io::copy(&mut child_stdout, &mut output)?; - - let status = child.wait()?; - let source = stdin_handle.join().expect( - "The thread writing to rustfmt's stdin doesn't do \ - anything that could panic", - ); - - match String::from_utf8(output) { - Ok(bindings) => match status.code() { - Some(0) => Ok(Cow::Owned(bindings)), - Some(2) => Err(io::Error::new( - io::ErrorKind::Other, - "Rustfmt parsing errors.".to_string(), - )), - Some(3) => { - warn!("Rustfmt could not format some lines."); - Ok(Cow::Owned(bindings)) - } - _ => Err(io::Error::new( - io::ErrorKind::Other, - "Internal rustfmt error".to_string(), - )), - }, - _ => Ok(Cow::Owned(source)), - } - } -} - -impl std::fmt::Display for Bindings { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut bytes = vec![]; - self.write(Box::new(&mut bytes) as Box) - .expect("writing to a vec cannot fail"); - f.write_str( - std::str::from_utf8(&bytes) - .expect("we should only write bindings that are valid utf-8"), - ) - } -} - -/// Determines whether the given cursor is in any of the files matched by the -/// options. -fn filter_builtins(ctx: &BindgenContext, cursor: &clang::Cursor) -> bool { - ctx.options().builtins || !cursor.is_builtin() -} - -/// Parse one `Item` from the Clang cursor. -fn parse_one( - ctx: &mut BindgenContext, - cursor: clang::Cursor, - parent: Option, -) -> clang_sys::CXChildVisitResult { - if !filter_builtins(ctx, &cursor) { - return CXChildVisit_Continue; - } - - use clang_sys::CXChildVisit_Continue; - match Item::parse(cursor, parent, ctx) { - Ok(..) => {} - Err(ParseError::Continue) => {} - Err(ParseError::Recurse) => { - cursor.visit(|child| parse_one(ctx, child, parent)); - } - } - CXChildVisit_Continue -} - -/// Parse the Clang AST into our `Item` internal representation. -fn parse(context: &mut BindgenContext) -> Result<(), ()> { - use clang_sys::*; - - let mut any_error = false; - for d in context.translation_unit().diags().iter() { - let msg = d.format(); - let is_err = d.severity() >= CXDiagnostic_Error; - eprintln!("{}, err: {}", msg, is_err); - any_error |= is_err; - } - - if any_error { - return Err(()); - } - - let cursor = context.translation_unit().cursor(); - - if context.options().emit_ast { - fn dump_if_not_builtin(cur: &clang::Cursor) -> CXChildVisitResult { - if !cur.is_builtin() { - clang::ast_dump(cur, 0) - } else { - CXChildVisit_Continue - } - } - cursor.visit(|cur| dump_if_not_builtin(&cur)); - } - - let root = context.root_module(); - context.with_module(root, |context| { - cursor.visit(|cursor| parse_one(context, cursor, None)) - }); - - assert!( - context.current_module() == context.root_module(), - "How did this happen?" - ); - Ok(()) -} - -/// Extracted Clang version data -#[derive(Debug)] -pub struct ClangVersion { - /// Major and minor semver, if parsing was successful - pub parsed: Option<(u32, u32)>, - /// full version string - pub full: String, -} - -/// Get the major and the minor semver numbers of Clang's version -pub fn clang_version() -> ClangVersion { - ensure_libclang_is_loaded(); - - //Debian clang version 11.0.1-2 - let raw_v: String = clang::extract_clang_version(); - let split_v: Option> = raw_v - .split_whitespace() - .find(|t| t.chars().next().map_or(false, |v| v.is_ascii_digit())) - .map(|v| v.split('.').collect()); - if let Some(v) = split_v { - if v.len() >= 2 { - let maybe_major = v[0].parse::(); - let maybe_minor = v[1].parse::(); - if let (Ok(major), Ok(minor)) = (maybe_major, maybe_minor) { - return ClangVersion { - parsed: Some((major, minor)), - full: raw_v.clone(), - }; - } - } - }; - ClangVersion { - parsed: None, - full: raw_v.clone(), - } -} - -/// Looks for the env var `var_${TARGET}`, and falls back to just `var` when it is not found. -fn get_target_dependent_env_var(var: &str) -> Option { - if let Ok(target) = env::var("TARGET") { - if let Ok(v) = env::var(&format!("{}_{}", var, target)) { - return Some(v); - } - if let Ok(v) = - env::var(&format!("{}_{}", var, target.replace("-", "_"))) - { - return Some(v); - } - } - env::var(var).ok() -} - -/// A ParseCallbacks implementation that will act on file includes by echoing a rerun-if-changed -/// line -/// -/// When running inside a `build.rs` script, this can be used to make cargo invalidate the -/// generated bindings whenever any of the files included from the header change: -/// ``` -/// use bindgen::builder; -/// let bindings = builder() -/// .header("path/to/input/header") -/// .parse_callbacks(Box::new(bindgen::CargoCallbacks)) -/// .generate(); -/// ``` -#[derive(Debug)] -pub struct CargoCallbacks; - -impl callbacks::ParseCallbacks for CargoCallbacks { - fn include_file(&self, filename: &str) { - println!("cargo:rerun-if-changed={}", filename); - } -} - -/// Test command_line_flag function. -#[test] -fn commandline_flag_unit_test_function() { - //Test 1 - let bindings = crate::builder(); - let command_line_flags = bindings.command_line_flags(); - - let test_cases = vec![ - "--rust-target", - "--no-derive-default", - "--generate", - "functions,types,vars,methods,constructors,destructors", - ] - .iter() - .map(|&x| x.into()) - .collect::>(); - - assert!(test_cases - .iter() - .all(|ref x| command_line_flags.contains(x),)); - - //Test 2 - let bindings = crate::builder() - .header("input_header") - .allowlist_type("Distinct_Type") - .allowlist_function("safe_function"); - - let command_line_flags = bindings.command_line_flags(); - let test_cases = vec![ - "--rust-target", - "input_header", - "--no-derive-default", - "--generate", - "functions,types,vars,methods,constructors,destructors", - "--allowlist-type", - "Distinct_Type", - "--allowlist-function", - "safe_function", - ] - .iter() - .map(|&x| x.into()) - .collect::>(); - println!("{:?}", command_line_flags); - - assert!(test_cases - .iter() - .all(|ref x| command_line_flags.contains(x),)); -} - -#[test] -fn test_rust_to_clang_target() { - assert_eq!(rust_to_clang_target("aarch64-apple-ios"), "arm64-apple-ios"); -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/log_stubs.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/log_stubs.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/log_stubs.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/log_stubs.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -#![allow(unused)] - -macro_rules! log { - (target: $target:expr, $lvl:expr, $($arg:tt)+) => {{ - let _ = $target; - let _ = log!($lvl, $($arg)+); - }}; - ($lvl:expr, $($arg:tt)+) => {{ - let _ = $lvl; - let _ = format_args!($($arg)+); - }}; -} -macro_rules! error { - (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; - ($($arg:tt)+) => { log!("", $($arg)+) }; -} -macro_rules! warn { - (target: $target:expr, $($arg:tt)*) => { log!(target: $target, "", $($arg)*) }; - ($($arg:tt)*) => { log!("", $($arg)*) }; -} -macro_rules! info { - (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; - ($($arg:tt)+) => { log!("", $($arg)+) }; -} -macro_rules! debug { - (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; - ($($arg:tt)+) => { log!("", $($arg)+) }; -} -macro_rules! trace { - (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; - ($($arg:tt)+) => { log!("", $($arg)+) }; -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/main.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/main.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/main.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/main.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,113 +0,0 @@ -extern crate bindgen; -#[cfg(feature = "logging")] -extern crate env_logger; -#[macro_use] -#[cfg(feature = "logging")] -extern crate log; -extern crate clap; - -use bindgen::clang_version; -use std::env; -use std::panic; - -#[macro_use] -#[cfg(not(feature = "logging"))] -mod log_stubs; - -mod options; -use crate::options::builder_from_flags; - -fn clang_version_check() { - let version = clang_version(); - let expected_version = if cfg!(feature = "testing_only_libclang_9") { - Some((9, 0)) - } else if cfg!(feature = "testing_only_libclang_5") { - Some((5, 0)) - } else if cfg!(feature = "testing_only_libclang_4") { - Some((4, 0)) - } else if cfg!(feature = "testing_only_libclang_3_9") { - Some((3, 9)) - } else { - None - }; - - info!( - "Clang Version: {}, parsed: {:?}", - version.full, version.parsed - ); - - if expected_version.is_some() { - // assert_eq!(version.parsed, version.parsed); - } -} - -pub fn main() { - #[cfg(feature = "logging")] - env_logger::init(); - - match builder_from_flags(env::args()) { - Ok((builder, output, verbose)) => { - clang_version_check(); - let builder_result = panic::catch_unwind(|| { - builder.generate().expect("Unable to generate bindings") - }); - - if builder_result.is_err() { - if verbose { - print_verbose_err(); - } - std::process::exit(1); - } - - let bindings = builder_result.unwrap(); - bindings.write(output).expect("Unable to write output"); - } - Err(error) => { - println!("{}", error); - std::process::exit(1); - } - }; -} - -fn print_verbose_err() { - println!("Bindgen unexpectedly panicked"); - println!( - "This may be caused by one of the known-unsupported \ - things (https://rust-lang.github.io/rust-bindgen/cpp.html), \ - please modify the bindgen flags to work around it as \ - described in https://rust-lang.github.io/rust-bindgen/cpp.html" - ); - println!( - "Otherwise, please file an issue at \ - https://github.com/rust-lang/rust-bindgen/issues/new" - ); -} - -#[cfg(test)] -mod test { - fn build_flags_output_helper(builder: &bindgen::Builder) { - let mut command_line_flags = builder.command_line_flags(); - command_line_flags.insert(0, "bindgen".to_string()); - - let flags_quoted: Vec = command_line_flags - .iter() - .map(|x| format!("{}", shlex::quote(x))) - .collect(); - let flags_str = flags_quoted.join(" "); - println!("{}", flags_str); - - let (builder, _output, _verbose) = - crate::options::builder_from_flags(command_line_flags.into_iter()) - .unwrap(); - builder.generate().expect("failed to generate bindings"); - } - - #[test] - fn commandline_multiple_headers() { - let bindings = bindgen::Builder::default() - .header("tests/headers/char.h") - .header("tests/headers/func_ptr.h") - .header("tests/headers/16-byte-alignment.h"); - build_flags_output_helper(&bindings); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/options.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/options.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/options.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/options.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1000 +0,0 @@ -use bindgen::{ - builder, AliasVariation, Builder, CodegenConfig, EnumVariation, - MacroTypeVariation, RustTarget, DEFAULT_ANON_FIELDS_PREFIX, - RUST_TARGET_STRINGS, -}; -use clap::{App, Arg}; -use std::fs::File; -use std::io::{self, stderr, Error, ErrorKind, Write}; -use std::path::PathBuf; -use std::str::FromStr; - -/// Construct a new [`Builder`](./struct.Builder.html) from command line flags. -pub fn builder_from_flags( - args: I, -) -> Result<(Builder, Box, bool), io::Error> -where - I: Iterator, -{ - let rust_target_help = format!( - "Version of the Rust compiler to target. Valid options are: {:?}. Defaults to {:?}.", - RUST_TARGET_STRINGS, - String::from(RustTarget::default()) - ); - - let matches = App::new("bindgen") - .version(option_env!("CARGO_PKG_VERSION").unwrap_or("unknown")) - .about("Generates Rust bindings from C/C++ headers.") - .usage("bindgen [FLAGS] [OPTIONS]
-- ...") - .args(&[ - Arg::with_name("header") - .help("C or C++ header file") - .required(true), - Arg::with_name("depfile") - .long("depfile") - .takes_value(true) - .help("Path to write depfile to"), - Arg::with_name("default-enum-style") - .long("default-enum-style") - .help("The default style of code used to generate enums.") - .value_name("variant") - .default_value("consts") - .possible_values(&[ - "consts", - "moduleconsts", - "bitfield", - "newtype", - "rust", - "rust_non_exhaustive", - ]) - .multiple(false), - Arg::with_name("bitfield-enum") - .long("bitfield-enum") - .help( - "Mark any enum whose name matches as a set of \ - bitfield flags.", - ) - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("newtype-enum") - .long("newtype-enum") - .help("Mark any enum whose name matches as a newtype.") - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("rustified-enum") - .long("rustified-enum") - .help("Mark any enum whose name matches as a Rust enum.") - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("constified-enum") - .long("constified-enum") - .help( - "Mark any enum whose name matches as a series of \ - constants.", - ) - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("constified-enum-module") - .long("constified-enum-module") - .help( - "Mark any enum whose name matches as a module of \ - constants.", - ) - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("default-macro-constant-type") - .long("default-macro-constant-type") - .help("The default signed/unsigned type for C macro constants.") - .value_name("variant") - .default_value("unsigned") - .possible_values(&["signed", "unsigned"]) - .multiple(false), - Arg::with_name("default-alias-style") - .long("default-alias-style") - .help("The default style of code used to generate typedefs.") - .value_name("variant") - .default_value("type_alias") - .possible_values(&[ - "type_alias", - "new_type", - "new_type_deref", - ]) - .multiple(false), - Arg::with_name("normal-alias") - .long("normal-alias") - .help( - "Mark any typedef alias whose name matches to use \ - normal type aliasing.", - ) - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("new-type-alias") - .long("new-type-alias") - .help( - "Mark any typedef alias whose name matches to have \ - a new type generated for it.", - ) - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("new-type-alias-deref") - .long("new-type-alias-deref") - .help( - "Mark any typedef alias whose name matches to have \ - a new type with Deref and DerefMut to the inner type.", - ) - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("blocklist-type") - .alias("blacklist-type") - .long("blocklist-type") - .help("Mark as hidden.") - .value_name("type") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("blocklist-function") - .alias("blacklist-function") - .long("blocklist-function") - .help("Mark as hidden.") - .value_name("function") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("blocklist-item") - .alias("blacklist-item") - .long("blocklist-item") - .help("Mark as hidden.") - .value_name("item") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("blocklist-file") - .alias("blacklist-file") - .long("blocklist-file") - .help("Mark all contents of as hidden.") - .value_name("path") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("no-layout-tests") - .long("no-layout-tests") - .help("Avoid generating layout tests for any type."), - Arg::with_name("no-derive-copy") - .long("no-derive-copy") - .help("Avoid deriving Copy on any type."), - Arg::with_name("no-derive-debug") - .long("no-derive-debug") - .help("Avoid deriving Debug on any type."), - Arg::with_name("no-derive-default") - .long("no-derive-default") - .hidden(true) - .help("Avoid deriving Default on any type."), - Arg::with_name("impl-debug").long("impl-debug").help( - "Create Debug implementation, if it can not be derived \ - automatically.", - ), - Arg::with_name("impl-partialeq") - .long("impl-partialeq") - .help( - "Create PartialEq implementation, if it can not be derived \ - automatically.", - ), - Arg::with_name("with-derive-default") - .long("with-derive-default") - .help("Derive Default on any type."), - Arg::with_name("with-derive-hash") - .long("with-derive-hash") - .help("Derive hash on any type."), - Arg::with_name("with-derive-partialeq") - .long("with-derive-partialeq") - .help("Derive partialeq on any type."), - Arg::with_name("with-derive-partialord") - .long("with-derive-partialord") - .help("Derive partialord on any type."), - Arg::with_name("with-derive-eq") - .long("with-derive-eq") - .help( - "Derive eq on any type. Enable this option also \ - enables --with-derive-partialeq", - ), - Arg::with_name("with-derive-ord") - .long("with-derive-ord") - .help( - "Derive ord on any type. Enable this option also \ - enables --with-derive-partialord", - ), - Arg::with_name("no-doc-comments") - .long("no-doc-comments") - .help( - "Avoid including doc comments in the output, see: \ - https://github.com/rust-lang/rust-bindgen/issues/426", - ), - Arg::with_name("no-recursive-allowlist") - .long("no-recursive-allowlist") - .alias("no-recursive-whitelist") - .help( - "Disable allowlisting types recursively. This will cause \ - bindgen to emit Rust code that won't compile! See the \ - `bindgen::Builder::allowlist_recursively` method's \ - documentation for details.", - ), - Arg::with_name("objc-extern-crate") - .long("objc-extern-crate") - .help("Use extern crate instead of use for objc."), - Arg::with_name("generate-block") - .long("generate-block") - .help("Generate block signatures instead of void pointers."), - Arg::with_name("block-extern-crate") - .long("block-extern-crate") - .help("Use extern crate instead of use for block."), - Arg::with_name("distrust-clang-mangling") - .long("distrust-clang-mangling") - .help("Do not trust the libclang-provided mangling"), - Arg::with_name("builtins").long("builtins").help( - "Output bindings for builtin definitions, e.g. \ - __builtin_va_list.", - ), - Arg::with_name("ctypes-prefix") - .long("ctypes-prefix") - .help( - "Use the given prefix before raw types instead of \ - ::std::os::raw.", - ) - .value_name("prefix") - .takes_value(true), - Arg::with_name("anon-fields-prefix") - .long("anon-fields-prefix") - .help("Use the given prefix for the anon fields.") - .value_name("prefix") - .default_value(DEFAULT_ANON_FIELDS_PREFIX) - .takes_value(true), - Arg::with_name("time-phases") - .long("time-phases") - .help("Time the different bindgen phases and print to stderr"), - // All positional arguments after the end of options marker, `--` - Arg::with_name("clang-args").last(true).multiple(true), - Arg::with_name("emit-clang-ast") - .long("emit-clang-ast") - .help("Output the Clang AST for debugging purposes."), - Arg::with_name("emit-ir") - .long("emit-ir") - .help("Output our internal IR for debugging purposes."), - Arg::with_name("emit-ir-graphviz") - .long("emit-ir-graphviz") - .help("Dump graphviz dot file.") - .value_name("path") - .takes_value(true), - Arg::with_name("enable-cxx-namespaces") - .long("enable-cxx-namespaces") - .help("Enable support for C++ namespaces."), - Arg::with_name("disable-name-namespacing") - .long("disable-name-namespacing") - .help( - "Disable namespacing via mangling, causing bindgen to \ - generate names like \"Baz\" instead of \"foo_bar_Baz\" \ - for an input name \"foo::bar::Baz\".", - ), - Arg::with_name("disable-nested-struct-naming") - .long("disable-nested-struct-naming") - .help( - "Disable nested struct naming, causing bindgen to generate \ - names like \"bar\" instead of \"foo_bar\" for a nested \ - definition \"struct foo { struct bar { } b; };\"." - ), - Arg::with_name("disable-untagged-union") - .long("disable-untagged-union") - .help( - "Disable support for native Rust unions.", - ), - Arg::with_name("disable-header-comment") - .long("disable-header-comment") - .help("Suppress insertion of bindgen's version identifier into generated bindings.") - .multiple(true), - Arg::with_name("ignore-functions") - .long("ignore-functions") - .help( - "Do not generate bindings for functions or methods. This \ - is useful when you only care about struct layouts.", - ), - Arg::with_name("generate") - .long("generate") - .help( - "Generate only given items, split by commas. \ - Valid values are \"functions\",\"types\", \"vars\", \ - \"methods\", \"constructors\" and \"destructors\".", - ) - .takes_value(true), - Arg::with_name("ignore-methods") - .long("ignore-methods") - .help("Do not generate bindings for methods."), - Arg::with_name("no-convert-floats") - .long("no-convert-floats") - .help("Do not automatically convert floats to f32/f64."), - Arg::with_name("no-prepend-enum-name") - .long("no-prepend-enum-name") - .help("Do not prepend the enum name to constant or newtype variants."), - Arg::with_name("no-include-path-detection") - .long("no-include-path-detection") - .help("Do not try to detect default include paths"), - Arg::with_name("fit-macro-constant-types") - .long("fit-macro-constant-types") - .help("Try to fit macro constants into types smaller than u32/i32"), - Arg::with_name("unstable-rust") - .long("unstable-rust") - .help("Generate unstable Rust code (deprecated; use --rust-target instead).") - .multiple(true), // FIXME: Pass legacy test suite - Arg::with_name("opaque-type") - .long("opaque-type") - .help("Mark as opaque.") - .value_name("type") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("output") - .short("o") - .long("output") - .help("Write Rust bindings to .") - .takes_value(true), - Arg::with_name("raw-line") - .long("raw-line") - .help("Add a raw line of Rust code at the beginning of output.") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("module-raw-line") - .long("module-raw-line") - .help("Add a raw line of Rust code to a given module.") - .takes_value(true) - .multiple(true) - .number_of_values(2) - .value_names(&["module-name", "raw-line"]), - Arg::with_name("rust-target") - .long("rust-target") - .help(&rust_target_help) - .takes_value(true), - Arg::with_name("use-core") - .long("use-core") - .help("Use types from Rust core instead of std."), - Arg::with_name("conservative-inline-namespaces") - .long("conservative-inline-namespaces") - .help( - "Conservatively generate inline namespaces to avoid name \ - conflicts.", - ), - Arg::with_name("use-msvc-mangling") - .long("use-msvc-mangling") - .help("MSVC C++ ABI mangling. DEPRECATED: Has no effect."), - Arg::with_name("allowlist-function") - .long("allowlist-function") - .alias("whitelist-function") - .help( - "Allowlist all the free-standing functions matching \ - . Other non-allowlisted functions will not be \ - generated.", - ) - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("generate-inline-functions") - .long("generate-inline-functions") - .help("Generate inline functions."), - Arg::with_name("allowlist-type") - .long("allowlist-type") - .alias("whitelist-type") - .help( - "Only generate types matching . Other non-allowlisted types will \ - not be generated.", - ) - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("allowlist-var") - .long("allowlist-var") - .alias("whitelist-var") - .help( - "Allowlist all the free-standing variables matching \ - . Other non-allowlisted variables will not be \ - generated.", - ) - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("verbose") - .long("verbose") - .help("Print verbose error messages."), - Arg::with_name("dump-preprocessed-input") - .long("dump-preprocessed-input") - .help( - "Preprocess and dump the input header files to disk. \ - Useful when debugging bindgen, using C-Reduce, or when \ - filing issues. The resulting file will be named \ - something like `__bindgen.i` or `__bindgen.ii`.", - ), - Arg::with_name("no-record-matches") - .long("no-record-matches") - .help( - "Do not record matching items in the regex sets. \ - This disables reporting of unused items.", - ), - Arg::with_name("size_t-is-usize") - .long("size_t-is-usize") - .help("Translate size_t to usize."), - Arg::with_name("no-rustfmt-bindings") - .long("no-rustfmt-bindings") - .help("Do not format the generated bindings with rustfmt."), - Arg::with_name("rustfmt-bindings") - .long("rustfmt-bindings") - .help( - "Format the generated bindings with rustfmt. DEPRECATED: \ - --rustfmt-bindings is now enabled by default. Disable \ - with --no-rustfmt-bindings.", - ), - Arg::with_name("rustfmt-configuration-file") - .long("rustfmt-configuration-file") - .help( - "The absolute path to the rustfmt configuration file. \ - The configuration file will be used for formatting the bindings. \ - This parameter is incompatible with --no-rustfmt-bindings.", - ) - .value_name("path") - .takes_value(true) - .multiple(false) - .number_of_values(1), - Arg::with_name("no-partialeq") - .long("no-partialeq") - .help("Avoid deriving PartialEq for types matching .") - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("no-copy") - .long("no-copy") - .help("Avoid deriving Copy for types matching .") - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("no-debug") - .long("no-debug") - .help("Avoid deriving Debug for types matching .") - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("no-default") - .long("no-default") - .help("Avoid deriving/implement Default for types matching .") - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("no-hash") - .long("no-hash") - .help("Avoid deriving Hash for types matching .") - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("must-use-type") - .long("must-use-type") - .help("Add #[must_use] annotation to types matching .") - .value_name("regex") - .takes_value(true) - .multiple(true) - .number_of_values(1), - Arg::with_name("enable-function-attribute-detection") - .long("enable-function-attribute-detection") - .help( - "Enables detecting unexposed attributes in functions (slow). - Used to generate #[must_use] annotations.", - ), - Arg::with_name("use-array-pointers-in-arguments") - .long("use-array-pointers-in-arguments") - .help("Use `*const [T; size]` instead of `*const T` for C arrays"), - Arg::with_name("wasm-import-module-name") - .long("wasm-import-module-name") - .value_name("name") - .takes_value(true) - .help("The name to be used in a #[link(wasm_import_module = ...)] statement"), - Arg::with_name("dynamic-loading") - .long("dynamic-loading") - .takes_value(true) - .help("Use dynamic loading mode with the given library name."), - Arg::with_name("dynamic-link-require-all") - .long("dynamic-link-require-all") - .help("Require successful linkage to all functions in the library."), - Arg::with_name("respect-cxx-access-specs") - .long("respect-cxx-access-specs") - .help("Makes generated bindings `pub` only for items if the items are publically accessible in C++."), - Arg::with_name("translate-enum-integer-types") - .long("translate-enum-integer-types") - .help("Always translate enum integer types to native Rust integer types."), - Arg::with_name("c-naming") - .long("c-naming") - .help("Generate types with C style naming."), - Arg::with_name("explicit-padding") - .long("explicit-padding") - .help("Always output explicit padding fields."), - ]) // .args() - .get_matches_from(args); - - let mut builder = builder(); - - if let Some(header) = matches.value_of("header") { - builder = builder.header(header); - } else { - return Err(Error::new(ErrorKind::Other, "Header not found")); - } - - if matches.is_present("unstable-rust") { - builder = builder.rust_target(RustTarget::Nightly); - writeln!( - &mut stderr(), - "warning: the `--unstable-rust` option is deprecated" - ) - .expect("Unable to write error message"); - } - - if let Some(rust_target) = matches.value_of("rust-target") { - builder = builder.rust_target(RustTarget::from_str(rust_target)?); - } - - if let Some(variant) = matches.value_of("default-enum-style") { - builder = builder.default_enum_style(EnumVariation::from_str(variant)?) - } - - if let Some(bitfields) = matches.values_of("bitfield-enum") { - for regex in bitfields { - builder = builder.bitfield_enum(regex); - } - } - - if let Some(newtypes) = matches.values_of("newtype-enum") { - for regex in newtypes { - builder = builder.newtype_enum(regex); - } - } - - if let Some(rustifieds) = matches.values_of("rustified-enum") { - for regex in rustifieds { - builder = builder.rustified_enum(regex); - } - } - - if let Some(const_enums) = matches.values_of("constified-enum") { - for regex in const_enums { - builder = builder.constified_enum(regex); - } - } - - if let Some(constified_mods) = matches.values_of("constified-enum-module") { - for regex in constified_mods { - builder = builder.constified_enum_module(regex); - } - } - - if let Some(variant) = matches.value_of("default-macro-constant-type") { - builder = builder - .default_macro_constant_type(MacroTypeVariation::from_str(variant)?) - } - - if let Some(variant) = matches.value_of("default-alias-style") { - builder = - builder.default_alias_style(AliasVariation::from_str(variant)?); - } - - if let Some(type_alias) = matches.values_of("normal-alias") { - for regex in type_alias { - builder = builder.type_alias(regex); - } - } - - if let Some(new_type) = matches.values_of("new-type-alias") { - for regex in new_type { - builder = builder.new_type_alias(regex); - } - } - - if let Some(new_type_deref) = matches.values_of("new-type-alias-deref") { - for regex in new_type_deref { - builder = builder.new_type_alias_deref(regex); - } - } - - if let Some(hidden_types) = matches.values_of("blocklist-type") { - for ty in hidden_types { - builder = builder.blocklist_type(ty); - } - } - - if let Some(hidden_functions) = matches.values_of("blocklist-function") { - for fun in hidden_functions { - builder = builder.blocklist_function(fun); - } - } - - if let Some(hidden_identifiers) = matches.values_of("blocklist-item") { - for id in hidden_identifiers { - builder = builder.blocklist_item(id); - } - } - - if let Some(hidden_files) = matches.values_of("blocklist-file") { - for file in hidden_files { - builder = builder.blocklist_file(file); - } - } - - if matches.is_present("builtins") { - builder = builder.emit_builtins(); - } - - if matches.is_present("no-layout-tests") { - builder = builder.layout_tests(false); - } - - if matches.is_present("no-derive-copy") { - builder = builder.derive_copy(false); - } - - if matches.is_present("no-derive-debug") { - builder = builder.derive_debug(false); - } - - if matches.is_present("impl-debug") { - builder = builder.impl_debug(true); - } - - if matches.is_present("impl-partialeq") { - builder = builder.impl_partialeq(true); - } - - if matches.is_present("with-derive-default") { - builder = builder.derive_default(true); - } - - if matches.is_present("with-derive-hash") { - builder = builder.derive_hash(true); - } - - if matches.is_present("with-derive-partialeq") { - builder = builder.derive_partialeq(true); - } - - if matches.is_present("with-derive-partialord") { - builder = builder.derive_partialord(true); - } - - if matches.is_present("with-derive-eq") { - builder = builder.derive_eq(true); - } - - if matches.is_present("with-derive-ord") { - builder = builder.derive_ord(true); - } - - if matches.is_present("no-derive-default") { - builder = builder.derive_default(false); - } - - if matches.is_present("no-prepend-enum-name") { - builder = builder.prepend_enum_name(false); - } - - if matches.is_present("no-include-path-detection") { - builder = builder.detect_include_paths(false); - } - - if matches.is_present("fit-macro-constant-types") { - builder = builder.fit_macro_constants(true); - } - - if matches.is_present("time-phases") { - builder = builder.time_phases(true); - } - - if matches.is_present("use-array-pointers-in-arguments") { - builder = builder.array_pointers_in_arguments(true); - } - - if let Some(wasm_import_name) = matches.value_of("wasm-import-module-name") - { - builder = builder.wasm_import_module_name(wasm_import_name); - } - - if let Some(prefix) = matches.value_of("ctypes-prefix") { - builder = builder.ctypes_prefix(prefix); - } - - if let Some(prefix) = matches.value_of("anon-fields-prefix") { - builder = builder.anon_fields_prefix(prefix); - } - - if let Some(what_to_generate) = matches.value_of("generate") { - let mut config = CodegenConfig::empty(); - for what in what_to_generate.split(',') { - match what { - "functions" => config.insert(CodegenConfig::FUNCTIONS), - "types" => config.insert(CodegenConfig::TYPES), - "vars" => config.insert(CodegenConfig::VARS), - "methods" => config.insert(CodegenConfig::METHODS), - "constructors" => config.insert(CodegenConfig::CONSTRUCTORS), - "destructors" => config.insert(CodegenConfig::DESTRUCTORS), - otherwise => { - return Err(Error::new( - ErrorKind::Other, - format!("Unknown generate item: {}", otherwise), - )); - } - } - } - builder = builder.with_codegen_config(config); - } - - if matches.is_present("emit-clang-ast") { - builder = builder.emit_clang_ast(); - } - - if matches.is_present("emit-ir") { - builder = builder.emit_ir(); - } - - if let Some(path) = matches.value_of("emit-ir-graphviz") { - builder = builder.emit_ir_graphviz(path); - } - - if matches.is_present("enable-cxx-namespaces") { - builder = builder.enable_cxx_namespaces(); - } - - if matches.is_present("enable-function-attribute-detection") { - builder = builder.enable_function_attribute_detection(); - } - - if matches.is_present("disable-name-namespacing") { - builder = builder.disable_name_namespacing(); - } - - if matches.is_present("disable-nested-struct-naming") { - builder = builder.disable_nested_struct_naming(); - } - - if matches.is_present("disable-untagged-union") { - builder = builder.disable_untagged_union(); - } - - if matches.is_present("disable-header-comment") { - builder = builder.disable_header_comment(); - } - - if matches.is_present("ignore-functions") { - builder = builder.ignore_functions(); - } - - if matches.is_present("ignore-methods") { - builder = builder.ignore_methods(); - } - - if matches.is_present("no-convert-floats") { - builder = builder.no_convert_floats(); - } - - if matches.is_present("no-doc-comments") { - builder = builder.generate_comments(false); - } - - if matches.is_present("no-recursive-allowlist") { - builder = builder.allowlist_recursively(false); - } - - if matches.is_present("objc-extern-crate") { - builder = builder.objc_extern_crate(true); - } - - if matches.is_present("generate-block") { - builder = builder.generate_block(true); - } - - if matches.is_present("block-extern-crate") { - builder = builder.block_extern_crate(true); - } - - if let Some(opaque_types) = matches.values_of("opaque-type") { - for ty in opaque_types { - builder = builder.opaque_type(ty); - } - } - - if let Some(lines) = matches.values_of("raw-line") { - for line in lines { - builder = builder.raw_line(line); - } - } - - if let Some(mut values) = matches.values_of("module-raw-line") { - while let Some(module) = values.next() { - let line = values.next().unwrap(); - builder = builder.module_raw_line(module, line); - } - } - - if matches.is_present("use-core") { - builder = builder.use_core(); - } - - if matches.is_present("distrust-clang-mangling") { - builder = builder.trust_clang_mangling(false); - } - - if matches.is_present("conservative-inline-namespaces") { - builder = builder.conservative_inline_namespaces(); - } - - if matches.is_present("generate-inline-functions") { - builder = builder.generate_inline_functions(true); - } - - if let Some(allowlist) = matches.values_of("allowlist-function") { - for regex in allowlist { - builder = builder.allowlist_function(regex); - } - } - - if let Some(allowlist) = matches.values_of("allowlist-type") { - for regex in allowlist { - builder = builder.allowlist_type(regex); - } - } - - if let Some(allowlist) = matches.values_of("allowlist-var") { - for regex in allowlist { - builder = builder.allowlist_var(regex); - } - } - - if let Some(args) = matches.values_of("clang-args") { - for arg in args { - builder = builder.clang_arg(arg); - } - } - - let output = if let Some(path) = matches.value_of("output") { - let file = File::create(path)?; - if let Some(depfile) = matches.value_of("depfile") { - builder = builder.depfile(path, depfile); - } - Box::new(io::BufWriter::new(file)) as Box - } else { - if let Some(depfile) = matches.value_of("depfile") { - builder = builder.depfile("-", depfile); - } - Box::new(io::BufWriter::new(io::stdout())) as Box - }; - - if matches.is_present("dump-preprocessed-input") { - builder.dump_preprocessed_input()?; - } - - if matches.is_present("no-record-matches") { - builder = builder.record_matches(false); - } - - if matches.is_present("size_t-is-usize") { - builder = builder.size_t_is_usize(true); - } - - let no_rustfmt_bindings = matches.is_present("no-rustfmt-bindings"); - if no_rustfmt_bindings { - builder = builder.rustfmt_bindings(false); - } - - if let Some(path_str) = matches.value_of("rustfmt-configuration-file") { - let path = PathBuf::from(path_str); - - if no_rustfmt_bindings { - return Err(Error::new( - ErrorKind::Other, - "Cannot supply both --rustfmt-configuration-file and --no-rustfmt-bindings", - )); - } - - if !path.is_absolute() { - return Err(Error::new( - ErrorKind::Other, - "--rustfmt-configuration--file needs to be an absolute path!", - )); - } - - if path.to_str().is_none() { - return Err(Error::new( - ErrorKind::Other, - "--rustfmt-configuration-file contains non-valid UTF8 characters.", - )); - } - - builder = builder.rustfmt_configuration_file(Some(path)); - } - - if let Some(no_partialeq) = matches.values_of("no-partialeq") { - for regex in no_partialeq { - builder = builder.no_partialeq(regex); - } - } - - if let Some(no_copy) = matches.values_of("no-copy") { - for regex in no_copy { - builder = builder.no_copy(regex); - } - } - - if let Some(no_debug) = matches.values_of("no-debug") { - for regex in no_debug { - builder = builder.no_debug(regex); - } - } - - if let Some(no_default) = matches.values_of("no-default") { - for regex in no_default { - builder = builder.no_default(regex); - } - } - - if let Some(no_hash) = matches.values_of("no-hash") { - for regex in no_hash { - builder = builder.no_hash(regex); - } - } - - if let Some(must_use_type) = matches.values_of("must-use-type") { - for regex in must_use_type { - builder = builder.must_use_type(regex); - } - } - - if let Some(dynamic_library_name) = matches.value_of("dynamic-loading") { - builder = builder.dynamic_library_name(dynamic_library_name); - } - - if matches.is_present("dynamic-link-require-all") { - builder = builder.dynamic_link_require_all(true); - } - - if matches.is_present("respect-cxx-access-specs") { - builder = builder.respect_cxx_access_specs(true); - } - - if matches.is_present("translate-enum-integer-types") { - builder = builder.translate_enum_integer_types(true); - } - - if matches.is_present("c-naming") { - builder = builder.c_naming(true); - } - - if matches.is_present("explicit-padding") { - builder = builder.explicit_padding(true); - } - - let verbose = matches.is_present("verbose"); - - Ok((builder, output, verbose)) -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/parse.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/parse.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/parse.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/parse.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,102 +0,0 @@ -//! Common traits and types related to parsing our IR from Clang cursors. - -use crate::clang; -use crate::ir::context::{BindgenContext, ItemId, TypeId}; -use crate::ir::ty::TypeKind; - -/// Not so much an error in the traditional sense, but a control flow message -/// when walking over Clang's AST with a cursor. -#[derive(Debug)] -pub enum ParseError { - /// Recurse down the current AST node's children. - Recurse, - /// Continue on to the next sibling AST node, or back up to the parent's - /// siblings if we've exhausted all of this node's siblings (and so on). - Continue, -} - -/// The result of parsing a Clang AST node. -#[derive(Debug)] -pub enum ParseResult { - /// We've already resolved this item before, here is the extant `ItemId` for - /// it. - AlreadyResolved(ItemId), - - /// This is a newly parsed item. If the cursor is `Some`, it points to the - /// AST node where the new `T` was declared. - New(T, Option), -} - -/// An intermediate representation "sub-item" (i.e. one of the types contained -/// inside an `ItemKind` variant) that can be parsed from a Clang cursor. -pub trait ClangSubItemParser: Sized { - /// Attempt to parse this type from the given cursor. - /// - /// The fact that is a reference guarantees it's held by the context, and - /// allow returning already existing types. - fn parse( - cursor: clang::Cursor, - context: &mut BindgenContext, - ) -> Result, ParseError>; -} - -/// An intermediate representation item that can be parsed from a Clang cursor. -pub trait ClangItemParser: Sized { - /// Parse this item from the given Clang cursor. - fn parse( - cursor: clang::Cursor, - parent: Option, - context: &mut BindgenContext, - ) -> Result; - - /// Parse this item from the given Clang type. - fn from_ty( - ty: &clang::Type, - location: clang::Cursor, - parent: Option, - ctx: &mut BindgenContext, - ) -> Result; - - /// Identical to `from_ty`, but use the given `id` as the `ItemId` for the - /// newly parsed item. - fn from_ty_with_id( - id: ItemId, - ty: &clang::Type, - location: clang::Cursor, - parent: Option, - ctx: &mut BindgenContext, - ) -> Result; - - /// Parse this item from the given Clang type, or if we haven't resolved all - /// the other items this one depends on, an unresolved reference. - fn from_ty_or_ref( - ty: clang::Type, - location: clang::Cursor, - parent_id: Option, - context: &mut BindgenContext, - ) -> TypeId; - - /// Identical to `from_ty_or_ref`, but use the given `potential_id` as the - /// `ItemId` for the newly parsed item. - fn from_ty_or_ref_with_id( - potential_id: ItemId, - ty: clang::Type, - location: clang::Cursor, - parent_id: Option, - context: &mut BindgenContext, - ) -> TypeId; - - /// Create a named template type. - fn type_param( - with_id: Option, - location: clang::Cursor, - ctx: &mut BindgenContext, - ) -> Option; - - /// Create a builtin type. - fn builtin_type( - kind: TypeKind, - is_const: bool, - context: &mut BindgenContext, - ) -> TypeId; -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/regex_set.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/regex_set.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/regex_set.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/regex_set.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,92 +0,0 @@ -//! A type that represents the union of a set of regular expressions. - -use regex::RegexSet as RxSet; -use std::cell::Cell; - -/// A dynamic set of regular expressions. -#[derive(Debug, Default)] -pub struct RegexSet { - items: Vec, - /// Whether any of the items in the set was ever matched. The length of this - /// vector is exactly the length of `items`. - matched: Vec>, - set: Option, - /// Whether we should record matching items in the `matched` vector or not. - record_matches: bool, -} - -impl RegexSet { - /// Is this set empty? - pub fn is_empty(&self) -> bool { - self.items.is_empty() - } - - /// Insert a new regex into this set. - pub fn insert(&mut self, string: S) - where - S: AsRef, - { - self.items.push(string.as_ref().to_owned()); - self.matched.push(Cell::new(false)); - self.set = None; - } - - /// Returns slice of String from its field 'items' - pub fn get_items(&self) -> &[String] { - &self.items[..] - } - - /// Returns an iterator over regexes in the set which didn't match any - /// strings yet. - pub fn unmatched_items(&self) -> impl Iterator { - self.items.iter().enumerate().filter_map(move |(i, item)| { - if !self.record_matches || self.matched[i].get() { - return None; - } - - Some(item) - }) - } - - /// Construct a RegexSet from the set of entries we've accumulated. - /// - /// Must be called before calling `matches()`, or it will always return - /// false. - pub fn build(&mut self, record_matches: bool) { - let items = self.items.iter().map(|item| format!("^{}$", item)); - self.record_matches = record_matches; - self.set = match RxSet::new(items) { - Ok(x) => Some(x), - Err(e) => { - warn!("Invalid regex in {:?}: {:?}", self.items, e); - None - } - } - } - - /// Does the given `string` match any of the regexes in this set? - pub fn matches(&self, string: S) -> bool - where - S: AsRef, - { - let s = string.as_ref(); - let set = match self.set { - Some(ref set) => set, - None => return false, - }; - - if !self.record_matches { - return set.is_match(s); - } - - let matches = set.matches(s); - if !matches.matched_any() { - return false; - } - for i in matches.iter() { - self.matched[i].set(true); - } - - true - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/time.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/time.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/time.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/time.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,52 +0,0 @@ -use std::io::{self, Write}; -use std::time::{Duration, Instant}; - -/// RAII timer to measure how long phases take. -#[derive(Debug)] -pub struct Timer<'a> { - output: bool, - name: &'a str, - start: Instant, -} - -impl<'a> Timer<'a> { - /// Creates a Timer with the given name, and starts it. By default, - /// will print to stderr when it is `drop`'d - pub fn new(name: &'a str) -> Self { - Timer { - output: true, - name, - start: Instant::now(), - } - } - - /// Sets whether or not the Timer will print a message - /// when it is dropped. - pub fn with_output(mut self, output: bool) -> Self { - self.output = output; - self - } - - /// Returns the time elapsed since the timer's creation - pub fn elapsed(&self) -> Duration { - Instant::now() - self.start - } - - fn print_elapsed(&mut self) { - if self.output { - let elapsed = self.elapsed(); - let time = (elapsed.as_secs() as f64) * 1e3 + - (elapsed.subsec_nanos() as f64) / 1e6; - let stderr = io::stderr(); - // Arbitrary output format, subject to change. - writeln!(stderr.lock(), " time: {:>9.3} ms.\t{}", time, self.name) - .expect("timer write should not fail"); - } - } -} - -impl<'a> Drop for Timer<'a> { - fn drop(&mut self) { - self.print_elapsed(); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/time.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/time.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/time.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/time.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,52 @@ +use std::io::{self, Write}; +use std::time::{Duration, Instant}; + +/// RAII timer to measure how long phases take. +#[derive(Debug)] +pub struct Timer<'a> { + output: bool, + name: &'a str, + start: Instant, +} + +impl<'a> Timer<'a> { + /// Creates a Timer with the given name, and starts it. By default, + /// will print to stderr when it is `drop`'d + pub fn new(name: &'a str) -> Self { + Timer { + output: true, + name, + start: Instant::now(), + } + } + + /// Sets whether or not the Timer will print a message + /// when it is dropped. + pub fn with_output(mut self, output: bool) -> Self { + self.output = output; + self + } + + /// Returns the time elapsed since the timer's creation + pub fn elapsed(&self) -> Duration { + Instant::now() - self.start + } + + fn print_elapsed(&mut self) { + if self.output { + let elapsed = self.elapsed(); + let time = (elapsed.as_secs() as f64) * 1e3 + + (elapsed.subsec_nanos() as f64) / 1e6; + let stderr = io::stderr(); + // Arbitrary output format, subject to change. + writeln!(stderr.lock(), " time: {:>9.3} ms.\t{}", time, self.name) + .expect("timer write should not fail"); + } + } +} + +impl<'a> Drop for Timer<'a> { + fn drop(&mut self) { + self.print_elapsed(); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/.cargo-checksum.json clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/.cargo-checksum.json --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/.cargo-checksum.json 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{"CHANGELOG.md":"34c3abc5bfaeb865e9b901c245f7e41f02a96b6be50beef5302458bff548ef3c","Cargo.toml":"a91f488aed0df3b4a1ddc1558f2b2fd05a543f9a37ce28e47548f8d9269ed979","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"c126e3dffb9c9e40c9355b5b0261ca84aa72425c5819bbe13abc40a161c51c78","src/filter/mod.rs":"92e7fd89ef1d5d46851de394a10349230045cb1e97f552f09f42031f372ab3dd","src/filter/regex.rs":"bdf875bac25e089e1e462f5dd01a88678067c24118ecd6268561c6a6af39747d","src/filter/string.rs":"fac54d51189fc0b5d2bff334b7a7e465177b431e3428299e345e1f90062d832e","src/fmt/humantime/extern_impl.rs":"97e0a128275c086b02760a33c22db8524f005994ebb1318a6ac582fd51cf444a","src/fmt/humantime/mod.rs":"f4111c26cf2ffb85c1d639bd7674d55af7e1736e7e98c52f7be3070046a3253f","src/fmt/humantime/shim_impl.rs":"cce9a252abd5952fa109a72b1dfb85a593d237e22606b2b608a32c69184560e9","src/fmt/mod.rs":"eb28817c0db5ed729f516927b981d0833cc3fe8bd2cf48256e03a04400d763df","src/fmt/writer/atty.rs":"09d14097dee61492828daaabdbde9f43251e2cb32e79d66c1c0e63f317761704","src/fmt/writer/mod.rs":"f3fb4adc740a1d9057bd398d751a9af24dead7ac108a8e2c571567778ce0f23e","src/fmt/writer/termcolor/extern_impl.rs":"4f02bdca6ad627856af1ac2eef0813104bd383685a2ff87983da9f7dee8261e3","src/fmt/writer/termcolor/mod.rs":"a790f9391a50cd52be6823e3e55942de13a8d12e23d63765342ae9e8dd6d091c","src/fmt/writer/termcolor/shim_impl.rs":"8e6e7d40782b14e33c6b75b81899a612549c2c7937ce28b48cdc60e1e3f8b855","src/lib.rs":"921561bf8d4efad4ab8938b36bc14e277968bfc364d131203822e64eb6e8265e","tests/init-twice-retains-filter.rs":"be5cd2132342d89ede1f5c4266173bb3c4d51cc22a1847f133d299a1c5430ccb","tests/log-in-log.rs":"29fecc65c1e0d1c22d79c97e7ca843ad44a91f27934148d7a05c48899a3f39d8","tests/log_tls_dtors.rs":"7320667d774a9b05037f7bf273fb2574dec0705707692a9cd2f46f4cd5bc68dd","tests/regexp_filter.rs":"a84263c995b534b6479a1d0abadf63f4f0264958ff86d9173d6b2139b82c4dc5"},"package":"a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7"} \ No newline at end of file diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/Cargo.toml clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/Cargo.toml --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/Cargo.toml 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,85 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "env_logger" -version = "0.9.3" -include = [ - "src/**/*", - "tests", - "LICENSE-*", - "README.md", - "CHANGELOG.md", -] -description = """ -A logging implementation for `log` which is configured via an environment -variable. -""" -documentation = "https://docs.rs/env_logger" -readme = "README.md" -keywords = [ - "logging", - "log", - "logger", -] -categories = ["development-tools::debugging"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/env-logger-rs/env_logger/" - -[[test]] -name = "regexp_filter" -harness = false - -[[test]] -name = "log-in-log" -harness = false - -[[test]] -name = "log_tls_dtors" -harness = false - -[[test]] -name = "init-twice-retains-filter" -harness = false - -[dependencies.atty] -version = "0.2.5" -optional = true - -[dependencies.humantime] -version = "2.0.0" -optional = true - -[dependencies.log] -version = "0.4.8" -features = ["std"] - -[dependencies.regex] -version = "1.0.3" -features = [ - "std", - "perf", -] -optional = true -default-features = false - -[dependencies.termcolor] -version = "1.1.1" -optional = true - -[features] -default = [ - "termcolor", - "atty", - "humantime", - "regex", -] diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/CHANGELOG.md clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/CHANGELOG.md --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/CHANGELOG.md 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -Changes to this crate are tracked via [GitHub Releases][releases]. - -[releases]: https://github.com/env-logger-rs/env_logger/releases diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/LICENSE-APACHE clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/LICENSE-APACHE --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/LICENSE-APACHE 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/LICENSE-MIT clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/LICENSE-MIT --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/LICENSE-MIT 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/README.md clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/README.md --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/README.md 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,183 +0,0 @@ -# env_logger - -[![crates.io](https://img.shields.io/crates/v/env_logger.svg)](https://crates.io/crates/env_logger) -[![Documentation](https://docs.rs/env_logger/badge.svg)](https://docs.rs/env_logger) -[![Documentation](https://img.shields.io/badge/docs-main-blue.svg)](https://env-logger-rs.github.io/env_logger/env_logger/index.html) - -Implements a logger that can be configured via environment variables. - -## Usage - -### In libraries - -`env_logger` makes sense when used in executables (binary projects). Libraries should use the [`log`](https://docs.rs/log) crate instead. - -### In executables - -It must be added along with `log` to the project dependencies: - -```toml -[dependencies] -log = "0.4.0" -env_logger = "0.9.0" -``` - -`env_logger` must be initialized as early as possible in the project. After it's initialized, you can use the `log` macros to do actual logging. - -```rust -#[macro_use] -extern crate log; - -fn main() { - env_logger::init(); - - info!("starting up"); - - // ... -} -``` - -Then when running the executable, specify a value for the **`RUST_LOG`** -environment variable that corresponds with the log messages you want to show. - -```bash -$ RUST_LOG=info ./main -[2018-11-03T06:09:06Z INFO default] starting up -``` - -The letter case is not significant for the logging level names; e.g., `debug`, -`DEBUG`, and `dEbuG` all represent the same logging level. Therefore, the -previous example could also have been written this way, specifying the log -level as `INFO` rather than as `info`: - -```bash -$ RUST_LOG=INFO ./main -[2018-11-03T06:09:06Z INFO default] starting up -``` - -So which form should you use? For consistency, our convention is to use lower -case names. Where our docs do use other forms, they do so in the context of -specific examples, so you won't be surprised if you see similar usage in the -wild. - -The log levels that may be specified correspond to the [`log::Level`][level-enum] -enum from the `log` crate. They are: - - * `error` - * `warn` - * `info` - * `debug` - * `trace` - -[level-enum]: https://docs.rs/log/latest/log/enum.Level.html "log::Level (docs.rs)" - -There is also a pseudo logging level, `off`, which may be specified to disable -all logging for a given module or for the entire application. As with the -logging levels, the letter case is not significant. - -`env_logger` can be configured in other ways besides an environment variable. See [the examples](https://github.com/env-logger-rs/env_logger/tree/main/examples) for more approaches. - -### In tests - -Tests can use the `env_logger` crate to see log messages generated during that test: - -```toml -[dependencies] -log = "0.4.0" - -[dev-dependencies] -env_logger = "0.9.0" -``` - -```rust -#[macro_use] -extern crate log; - -fn add_one(num: i32) -> i32 { - info!("add_one called with {}", num); - num + 1 -} - -#[cfg(test)] -mod tests { - use super::*; - - fn init() { - let _ = env_logger::builder().is_test(true).try_init(); - } - - #[test] - fn it_adds_one() { - init(); - - info!("can log from the test too"); - assert_eq!(3, add_one(2)); - } - - #[test] - fn it_handles_negative_numbers() { - init(); - - info!("logging from another test"); - assert_eq!(-7, add_one(-8)); - } -} -``` - -Assuming the module under test is called `my_lib`, running the tests with the -`RUST_LOG` filtering to info messages from this module looks like: - -```bash -$ RUST_LOG=my_lib=info cargo test - Running target/debug/my_lib-... - -running 2 tests -[INFO my_lib::tests] logging from another test -[INFO my_lib] add_one called with -8 -test tests::it_handles_negative_numbers ... ok -[INFO my_lib::tests] can log from the test too -[INFO my_lib] add_one called with 2 -test tests::it_adds_one ... ok - -test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured -``` - -Note that `env_logger::try_init()` needs to be called in each test in which you -want to enable logging. Additionally, the default behavior of tests to -run in parallel means that logging output may be interleaved with test output. -Either run tests in a single thread by specifying `RUST_TEST_THREADS=1` or by -running one test by specifying its name as an argument to the test binaries as -directed by the `cargo test` help docs: - -```bash -$ RUST_LOG=my_lib=info cargo test it_adds_one - Running target/debug/my_lib-... - -running 1 test -[INFO my_lib::tests] can log from the test too -[INFO my_lib] add_one called with 2 -test tests::it_adds_one ... ok - -test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured -``` - -## Configuring log target - -By default, `env_logger` logs to stderr. If you want to log to stdout instead, -you can use the `Builder` to change the log target: - -```rust -use std::env; -use env_logger::{Builder, Target}; - -let mut builder = Builder::from_default_env(); -builder.target(Target::Stdout); - -builder.init(); -``` - -## Stability of the default format - -The default format won't optimise for long-term stability, and explicitly makes no guarantees about the stability of its output across major, minor or patch version bumps during `0.x`. - -If you want to capture or interpret the output of `env_logger` programmatically then you should use a custom format. diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/filter/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/filter/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/filter/mod.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/filter/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,868 +0,0 @@ -//! Filtering for log records. -//! -//! This module contains the log filtering used by `env_logger` to match records. -//! You can use the `Filter` type in your own logger implementation to use the same -//! filter parsing and matching as `env_logger`. For more details about the format -//! for directive strings see [Enabling Logging]. -//! -//! ## Using `env_logger` in your own logger -//! -//! You can use `env_logger`'s filtering functionality with your own logger. -//! Call [`Builder::parse`] to parse directives from a string when constructing -//! your logger. Call [`Filter::matches`] to check whether a record should be -//! logged based on the parsed filters when log records are received. -//! -//! ``` -//! extern crate log; -//! extern crate env_logger; -//! use env_logger::filter::Filter; -//! use log::{Log, Metadata, Record}; -//! -//! struct MyLogger { -//! filter: Filter -//! } -//! -//! impl MyLogger { -//! fn new() -> MyLogger { -//! use env_logger::filter::Builder; -//! let mut builder = Builder::new(); -//! -//! // Parse a directives string from an environment variable -//! if let Ok(ref filter) = std::env::var("MY_LOG_LEVEL") { -//! builder.parse(filter); -//! } -//! -//! MyLogger { -//! filter: builder.build() -//! } -//! } -//! } -//! -//! impl Log for MyLogger { -//! fn enabled(&self, metadata: &Metadata) -> bool { -//! self.filter.enabled(metadata) -//! } -//! -//! fn log(&self, record: &Record) { -//! // Check if the record is matched by the filter -//! if self.filter.matches(record) { -//! println!("{:?}", record); -//! } -//! } -//! -//! fn flush(&self) {} -//! } -//! ``` -//! -//! [Enabling Logging]: ../index.html#enabling-logging -//! [`Builder::parse`]: struct.Builder.html#method.parse -//! [`Filter::matches`]: struct.Filter.html#method.matches - -use log::{Level, LevelFilter, Metadata, Record}; -use std::collections::HashMap; -use std::env; -use std::fmt; -use std::mem; - -#[cfg(feature = "regex")] -#[path = "regex.rs"] -mod inner; - -#[cfg(not(feature = "regex"))] -#[path = "string.rs"] -mod inner; - -/// A log filter. -/// -/// This struct can be used to determine whether or not a log record -/// should be written to the output. -/// Use the [`Builder`] type to parse and construct a `Filter`. -/// -/// [`Builder`]: struct.Builder.html -pub struct Filter { - directives: Vec, - filter: Option, -} - -/// A builder for a log filter. -/// -/// It can be used to parse a set of directives from a string before building -/// a [`Filter`] instance. -/// -/// ## Example -/// -/// ``` -/// # #[macro_use] extern crate log; -/// # use std::env; -/// use env_logger::filter::Builder; -/// -/// let mut builder = Builder::new(); -/// -/// // Parse a logging filter from an environment variable. -/// if let Ok(rust_log) = env::var("RUST_LOG") { -/// builder.parse(&rust_log); -/// } -/// -/// let filter = builder.build(); -/// ``` -/// -/// [`Filter`]: struct.Filter.html -pub struct Builder { - directives: HashMap, LevelFilter>, - filter: Option, - built: bool, -} - -#[derive(Debug)] -struct Directive { - name: Option, - level: LevelFilter, -} - -impl Filter { - /// Returns the maximum `LevelFilter` that this filter instance is - /// configured to output. - /// - /// # Example - /// - /// ```rust - /// use log::LevelFilter; - /// use env_logger::filter::Builder; - /// - /// let mut builder = Builder::new(); - /// builder.filter(Some("module1"), LevelFilter::Info); - /// builder.filter(Some("module2"), LevelFilter::Error); - /// - /// let filter = builder.build(); - /// assert_eq!(filter.filter(), LevelFilter::Info); - /// ``` - pub fn filter(&self) -> LevelFilter { - self.directives - .iter() - .map(|d| d.level) - .max() - .unwrap_or(LevelFilter::Off) - } - - /// Checks if this record matches the configured filter. - pub fn matches(&self, record: &Record) -> bool { - if !self.enabled(record.metadata()) { - return false; - } - - if let Some(filter) = self.filter.as_ref() { - if !filter.is_match(&record.args().to_string()) { - return false; - } - } - - true - } - - /// Determines if a log message with the specified metadata would be logged. - pub fn enabled(&self, metadata: &Metadata) -> bool { - let level = metadata.level(); - let target = metadata.target(); - - enabled(&self.directives, level, target) - } -} - -impl Builder { - /// Initializes the filter builder with defaults. - pub fn new() -> Builder { - Builder { - directives: HashMap::new(), - filter: None, - built: false, - } - } - - /// Initializes the filter builder from an environment. - pub fn from_env(env: &str) -> Builder { - let mut builder = Builder::new(); - - if let Ok(s) = env::var(env) { - builder.parse(&s); - } - - builder - } - - /// Adds a directive to the filter for a specific module. - pub fn filter_module(&mut self, module: &str, level: LevelFilter) -> &mut Self { - self.filter(Some(module), level) - } - - /// Adds a directive to the filter for all modules. - pub fn filter_level(&mut self, level: LevelFilter) -> &mut Self { - self.filter(None, level) - } - - /// Adds a directive to the filter. - /// - /// The given module (if any) will log at most the specified level provided. - /// If no module is provided then the filter will apply to all log messages. - pub fn filter(&mut self, module: Option<&str>, level: LevelFilter) -> &mut Self { - self.directives.insert(module.map(|s| s.to_string()), level); - self - } - - /// Parses the directives string. - /// - /// See the [Enabling Logging] section for more details. - /// - /// [Enabling Logging]: ../index.html#enabling-logging - pub fn parse(&mut self, filters: &str) -> &mut Self { - let (directives, filter) = parse_spec(filters); - - self.filter = filter; - - for directive in directives { - self.directives.insert(directive.name, directive.level); - } - self - } - - /// Build a log filter. - pub fn build(&mut self) -> Filter { - assert!(!self.built, "attempt to re-use consumed builder"); - self.built = true; - - let mut directives = Vec::new(); - if self.directives.is_empty() { - // Adds the default filter if none exist - directives.push(Directive { - name: None, - level: LevelFilter::Error, - }); - } else { - // Consume map of directives. - let directives_map = mem::take(&mut self.directives); - directives = directives_map - .into_iter() - .map(|(name, level)| Directive { name, level }) - .collect(); - // Sort the directives by length of their name, this allows a - // little more efficient lookup at runtime. - directives.sort_by(|a, b| { - let alen = a.name.as_ref().map(|a| a.len()).unwrap_or(0); - let blen = b.name.as_ref().map(|b| b.len()).unwrap_or(0); - alen.cmp(&blen) - }); - } - - Filter { - directives: mem::take(&mut directives), - filter: mem::replace(&mut self.filter, None), - } - } -} - -impl Default for Builder { - fn default() -> Self { - Builder::new() - } -} - -impl fmt::Debug for Filter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Filter") - .field("filter", &self.filter) - .field("directives", &self.directives) - .finish() - } -} - -impl fmt::Debug for Builder { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.built { - f.debug_struct("Filter").field("built", &true).finish() - } else { - f.debug_struct("Filter") - .field("filter", &self.filter) - .field("directives", &self.directives) - .finish() - } - } -} - -/// Parse a logging specification string (e.g: "crate1,crate2::mod3,crate3::x=error/foo") -/// and return a vector with log directives. -fn parse_spec(spec: &str) -> (Vec, Option) { - let mut dirs = Vec::new(); - - let mut parts = spec.split('/'); - let mods = parts.next(); - let filter = parts.next(); - if parts.next().is_some() { - eprintln!( - "warning: invalid logging spec '{}', \ - ignoring it (too many '/'s)", - spec - ); - return (dirs, None); - } - if let Some(m) = mods { - for s in m.split(',').map(|ss| ss.trim()) { - if s.is_empty() { - continue; - } - let mut parts = s.split('='); - let (log_level, name) = - match (parts.next(), parts.next().map(|s| s.trim()), parts.next()) { - (Some(part0), None, None) => { - // if the single argument is a log-level string or number, - // treat that as a global fallback - match part0.parse() { - Ok(num) => (num, None), - Err(_) => (LevelFilter::max(), Some(part0)), - } - } - (Some(part0), Some(""), None) => (LevelFilter::max(), Some(part0)), - (Some(part0), Some(part1), None) => match part1.parse() { - Ok(num) => (num, Some(part0)), - _ => { - eprintln!( - "warning: invalid logging spec '{}', \ - ignoring it", - part1 - ); - continue; - } - }, - _ => { - eprintln!( - "warning: invalid logging spec '{}', \ - ignoring it", - s - ); - continue; - } - }; - dirs.push(Directive { - name: name.map(|s| s.to_string()), - level: log_level, - }); - } - } - - let filter = filter.and_then(|filter| match inner::Filter::new(filter) { - Ok(re) => Some(re), - Err(e) => { - eprintln!("warning: invalid regex filter - {}", e); - None - } - }); - - (dirs, filter) -} - -// Check whether a level and target are enabled by the set of directives. -fn enabled(directives: &[Directive], level: Level, target: &str) -> bool { - // Search for the longest match, the vector is assumed to be pre-sorted. - for directive in directives.iter().rev() { - match directive.name { - Some(ref name) if !target.starts_with(&**name) => {} - Some(..) | None => return level <= directive.level, - } - } - false -} - -#[cfg(test)] -mod tests { - use log::{Level, LevelFilter}; - - use super::{enabled, parse_spec, Builder, Directive, Filter}; - - fn make_logger_filter(dirs: Vec) -> Filter { - let mut logger = Builder::new().build(); - logger.directives = dirs; - logger - } - - #[test] - fn filter_info() { - let logger = Builder::new().filter(None, LevelFilter::Info).build(); - assert!(enabled(&logger.directives, Level::Info, "crate1")); - assert!(!enabled(&logger.directives, Level::Debug, "crate1")); - } - - #[test] - fn filter_beginning_longest_match() { - let logger = Builder::new() - .filter(Some("crate2"), LevelFilter::Info) - .filter(Some("crate2::mod"), LevelFilter::Debug) - .filter(Some("crate1::mod1"), LevelFilter::Warn) - .build(); - assert!(enabled(&logger.directives, Level::Debug, "crate2::mod1")); - assert!(!enabled(&logger.directives, Level::Debug, "crate2")); - } - - // Some of our tests are only correct or complete when they cover the full - // universe of variants for log::Level. In the unlikely event that a new - // variant is added in the future, this test will detect the scenario and - // alert us to the need to review and update the tests. In such a - // situation, this test will fail to compile, and the error message will - // look something like this: - // - // error[E0004]: non-exhaustive patterns: `NewVariant` not covered - // --> src/filter/mod.rs:413:15 - // | - // 413 | match level_universe { - // | ^^^^^^^^^^^^^^ pattern `NewVariant` not covered - #[test] - fn ensure_tests_cover_level_universe() { - let level_universe: Level = Level::Trace; // use of trace variant is arbitrary - match level_universe { - Level::Error | Level::Warn | Level::Info | Level::Debug | Level::Trace => (), - } - } - - #[test] - fn parse_default() { - let logger = Builder::new().parse("info,crate1::mod1=warn").build(); - assert!(enabled(&logger.directives, Level::Warn, "crate1::mod1")); - assert!(enabled(&logger.directives, Level::Info, "crate2::mod2")); - } - - #[test] - fn parse_default_bare_level_off_lc() { - let logger = Builder::new().parse("off").build(); - assert!(!enabled(&logger.directives, Level::Error, "")); - assert!(!enabled(&logger.directives, Level::Warn, "")); - assert!(!enabled(&logger.directives, Level::Info, "")); - assert!(!enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - - #[test] - fn parse_default_bare_level_off_uc() { - let logger = Builder::new().parse("OFF").build(); - assert!(!enabled(&logger.directives, Level::Error, "")); - assert!(!enabled(&logger.directives, Level::Warn, "")); - assert!(!enabled(&logger.directives, Level::Info, "")); - assert!(!enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - - #[test] - fn parse_default_bare_level_error_lc() { - let logger = Builder::new().parse("error").build(); - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(!enabled(&logger.directives, Level::Warn, "")); - assert!(!enabled(&logger.directives, Level::Info, "")); - assert!(!enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - - #[test] - fn parse_default_bare_level_error_uc() { - let logger = Builder::new().parse("ERROR").build(); - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(!enabled(&logger.directives, Level::Warn, "")); - assert!(!enabled(&logger.directives, Level::Info, "")); - assert!(!enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - - #[test] - fn parse_default_bare_level_warn_lc() { - let logger = Builder::new().parse("warn").build(); - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(enabled(&logger.directives, Level::Warn, "")); - assert!(!enabled(&logger.directives, Level::Info, "")); - assert!(!enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - - #[test] - fn parse_default_bare_level_warn_uc() { - let logger = Builder::new().parse("WARN").build(); - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(enabled(&logger.directives, Level::Warn, "")); - assert!(!enabled(&logger.directives, Level::Info, "")); - assert!(!enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - - #[test] - fn parse_default_bare_level_info_lc() { - let logger = Builder::new().parse("info").build(); - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(enabled(&logger.directives, Level::Warn, "")); - assert!(enabled(&logger.directives, Level::Info, "")); - assert!(!enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - - #[test] - fn parse_default_bare_level_info_uc() { - let logger = Builder::new().parse("INFO").build(); - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(enabled(&logger.directives, Level::Warn, "")); - assert!(enabled(&logger.directives, Level::Info, "")); - assert!(!enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - - #[test] - fn parse_default_bare_level_debug_lc() { - let logger = Builder::new().parse("debug").build(); - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(enabled(&logger.directives, Level::Warn, "")); - assert!(enabled(&logger.directives, Level::Info, "")); - assert!(enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - - #[test] - fn parse_default_bare_level_debug_uc() { - let logger = Builder::new().parse("DEBUG").build(); - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(enabled(&logger.directives, Level::Warn, "")); - assert!(enabled(&logger.directives, Level::Info, "")); - assert!(enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - - #[test] - fn parse_default_bare_level_trace_lc() { - let logger = Builder::new().parse("trace").build(); - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(enabled(&logger.directives, Level::Warn, "")); - assert!(enabled(&logger.directives, Level::Info, "")); - assert!(enabled(&logger.directives, Level::Debug, "")); - assert!(enabled(&logger.directives, Level::Trace, "")); - } - - #[test] - fn parse_default_bare_level_trace_uc() { - let logger = Builder::new().parse("TRACE").build(); - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(enabled(&logger.directives, Level::Warn, "")); - assert!(enabled(&logger.directives, Level::Info, "")); - assert!(enabled(&logger.directives, Level::Debug, "")); - assert!(enabled(&logger.directives, Level::Trace, "")); - } - - // In practice, the desired log level is typically specified by a token - // that is either all lowercase (e.g., 'trace') or all uppercase (.e.g, - // 'TRACE'), but this tests serves as a reminder that - // log::Level::from_str() ignores all case variants. - #[test] - fn parse_default_bare_level_debug_mixed() { - { - let logger = Builder::new().parse("Debug").build(); - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(enabled(&logger.directives, Level::Warn, "")); - assert!(enabled(&logger.directives, Level::Info, "")); - assert!(enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - { - let logger = Builder::new().parse("debuG").build(); - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(enabled(&logger.directives, Level::Warn, "")); - assert!(enabled(&logger.directives, Level::Info, "")); - assert!(enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - { - let logger = Builder::new().parse("deBug").build(); - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(enabled(&logger.directives, Level::Warn, "")); - assert!(enabled(&logger.directives, Level::Info, "")); - assert!(enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - { - let logger = Builder::new().parse("DeBuG").build(); // LaTeX flavor! - assert!(enabled(&logger.directives, Level::Error, "")); - assert!(enabled(&logger.directives, Level::Warn, "")); - assert!(enabled(&logger.directives, Level::Info, "")); - assert!(enabled(&logger.directives, Level::Debug, "")); - assert!(!enabled(&logger.directives, Level::Trace, "")); - } - } - - #[test] - fn match_full_path() { - let logger = make_logger_filter(vec![ - Directive { - name: Some("crate2".to_string()), - level: LevelFilter::Info, - }, - Directive { - name: Some("crate1::mod1".to_string()), - level: LevelFilter::Warn, - }, - ]); - assert!(enabled(&logger.directives, Level::Warn, "crate1::mod1")); - assert!(!enabled(&logger.directives, Level::Info, "crate1::mod1")); - assert!(enabled(&logger.directives, Level::Info, "crate2")); - assert!(!enabled(&logger.directives, Level::Debug, "crate2")); - } - - #[test] - fn no_match() { - let logger = make_logger_filter(vec![ - Directive { - name: Some("crate2".to_string()), - level: LevelFilter::Info, - }, - Directive { - name: Some("crate1::mod1".to_string()), - level: LevelFilter::Warn, - }, - ]); - assert!(!enabled(&logger.directives, Level::Warn, "crate3")); - } - - #[test] - fn match_beginning() { - let logger = make_logger_filter(vec![ - Directive { - name: Some("crate2".to_string()), - level: LevelFilter::Info, - }, - Directive { - name: Some("crate1::mod1".to_string()), - level: LevelFilter::Warn, - }, - ]); - assert!(enabled(&logger.directives, Level::Info, "crate2::mod1")); - } - - #[test] - fn match_beginning_longest_match() { - let logger = make_logger_filter(vec![ - Directive { - name: Some("crate2".to_string()), - level: LevelFilter::Info, - }, - Directive { - name: Some("crate2::mod".to_string()), - level: LevelFilter::Debug, - }, - Directive { - name: Some("crate1::mod1".to_string()), - level: LevelFilter::Warn, - }, - ]); - assert!(enabled(&logger.directives, Level::Debug, "crate2::mod1")); - assert!(!enabled(&logger.directives, Level::Debug, "crate2")); - } - - #[test] - fn match_default() { - let logger = make_logger_filter(vec![ - Directive { - name: None, - level: LevelFilter::Info, - }, - Directive { - name: Some("crate1::mod1".to_string()), - level: LevelFilter::Warn, - }, - ]); - assert!(enabled(&logger.directives, Level::Warn, "crate1::mod1")); - assert!(enabled(&logger.directives, Level::Info, "crate2::mod2")); - } - - #[test] - fn zero_level() { - let logger = make_logger_filter(vec![ - Directive { - name: None, - level: LevelFilter::Info, - }, - Directive { - name: Some("crate1::mod1".to_string()), - level: LevelFilter::Off, - }, - ]); - assert!(!enabled(&logger.directives, Level::Error, "crate1::mod1")); - assert!(enabled(&logger.directives, Level::Info, "crate2::mod2")); - } - - #[test] - fn parse_spec_valid() { - let (dirs, filter) = parse_spec("crate1::mod1=error,crate1::mod2,crate2=debug"); - assert_eq!(dirs.len(), 3); - assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); - assert_eq!(dirs[0].level, LevelFilter::Error); - - assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); - assert_eq!(dirs[1].level, LevelFilter::max()); - - assert_eq!(dirs[2].name, Some("crate2".to_string())); - assert_eq!(dirs[2].level, LevelFilter::Debug); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_invalid_crate() { - // test parse_spec with multiple = in specification - let (dirs, filter) = parse_spec("crate1::mod1=warn=info,crate2=debug"); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, Some("crate2".to_string())); - assert_eq!(dirs[0].level, LevelFilter::Debug); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_invalid_level() { - // test parse_spec with 'noNumber' as log level - let (dirs, filter) = parse_spec("crate1::mod1=noNumber,crate2=debug"); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, Some("crate2".to_string())); - assert_eq!(dirs[0].level, LevelFilter::Debug); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_string_level() { - // test parse_spec with 'warn' as log level - let (dirs, filter) = parse_spec("crate1::mod1=wrong,crate2=warn"); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, Some("crate2".to_string())); - assert_eq!(dirs[0].level, LevelFilter::Warn); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_empty_level() { - // test parse_spec with '' as log level - let (dirs, filter) = parse_spec("crate1::mod1=wrong,crate2="); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, Some("crate2".to_string())); - assert_eq!(dirs[0].level, LevelFilter::max()); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_empty_level_isolated() { - // test parse_spec with "" as log level (and the entire spec str) - let (dirs, filter) = parse_spec(""); // should be ignored - assert_eq!(dirs.len(), 0); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_blank_level_isolated() { - // test parse_spec with a white-space-only string specified as the log - // level (and the entire spec str) - let (dirs, filter) = parse_spec(" "); // should be ignored - assert_eq!(dirs.len(), 0); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_blank_level_isolated_comma_only() { - // The spec should contain zero or more comma-separated string slices, - // so a comma-only string should be interpretted as two empty strings - // (which should both be treated as invalid, so ignored). - let (dirs, filter) = parse_spec(","); // should be ignored - assert_eq!(dirs.len(), 0); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_blank_level_isolated_comma_blank() { - // The spec should contain zero or more comma-separated string slices, - // so this bogus spec should be interpretted as containing one empty - // string and one blank string. Both should both be treated as - // invalid, so ignored. - let (dirs, filter) = parse_spec(", "); // should be ignored - assert_eq!(dirs.len(), 0); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_blank_level_isolated_blank_comma() { - // The spec should contain zero or more comma-separated string slices, - // so this bogus spec should be interpretted as containing one blank - // string and one empty string. Both should both be treated as - // invalid, so ignored. - let (dirs, filter) = parse_spec(" ,"); // should be ignored - assert_eq!(dirs.len(), 0); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_global() { - // test parse_spec with no crate - let (dirs, filter) = parse_spec("warn,crate2=debug"); - assert_eq!(dirs.len(), 2); - assert_eq!(dirs[0].name, None); - assert_eq!(dirs[0].level, LevelFilter::Warn); - assert_eq!(dirs[1].name, Some("crate2".to_string())); - assert_eq!(dirs[1].level, LevelFilter::Debug); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_global_bare_warn_lc() { - // test parse_spec with no crate, in isolation, all lowercase - let (dirs, filter) = parse_spec("warn"); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, None); - assert_eq!(dirs[0].level, LevelFilter::Warn); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_global_bare_warn_uc() { - // test parse_spec with no crate, in isolation, all uppercase - let (dirs, filter) = parse_spec("WARN"); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, None); - assert_eq!(dirs[0].level, LevelFilter::Warn); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_global_bare_warn_mixed() { - // test parse_spec with no crate, in isolation, mixed case - let (dirs, filter) = parse_spec("wArN"); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, None); - assert_eq!(dirs[0].level, LevelFilter::Warn); - assert!(filter.is_none()); - } - - #[test] - fn parse_spec_valid_filter() { - let (dirs, filter) = parse_spec("crate1::mod1=error,crate1::mod2,crate2=debug/abc"); - assert_eq!(dirs.len(), 3); - assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); - assert_eq!(dirs[0].level, LevelFilter::Error); - - assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); - assert_eq!(dirs[1].level, LevelFilter::max()); - - assert_eq!(dirs[2].name, Some("crate2".to_string())); - assert_eq!(dirs[2].level, LevelFilter::Debug); - assert!(filter.is_some() && filter.unwrap().to_string() == "abc"); - } - - #[test] - fn parse_spec_invalid_crate_filter() { - let (dirs, filter) = parse_spec("crate1::mod1=error=warn,crate2=debug/a.c"); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, Some("crate2".to_string())); - assert_eq!(dirs[0].level, LevelFilter::Debug); - assert!(filter.is_some() && filter.unwrap().to_string() == "a.c"); - } - - #[test] - fn parse_spec_empty_with_filter() { - let (dirs, filter) = parse_spec("crate1/a*c"); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, Some("crate1".to_string())); - assert_eq!(dirs[0].level, LevelFilter::max()); - assert!(filter.is_some() && filter.unwrap().to_string() == "a*c"); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/filter/regex.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/filter/regex.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/filter/regex.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/filter/regex.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -extern crate regex; - -use std::fmt; - -use self::regex::Regex; - -#[derive(Debug)] -pub struct Filter { - inner: Regex, -} - -impl Filter { - pub fn new(spec: &str) -> Result { - match Regex::new(spec) { - Ok(r) => Ok(Filter { inner: r }), - Err(e) => Err(e.to_string()), - } - } - - pub fn is_match(&self, s: &str) -> bool { - self.inner.is_match(s) - } -} - -impl fmt::Display for Filter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/filter/string.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/filter/string.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/filter/string.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/filter/string.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -use std::fmt; - -#[derive(Debug)] -pub struct Filter { - inner: String, -} - -impl Filter { - pub fn new(spec: &str) -> Result { - Ok(Filter { - inner: spec.to_string(), - }) - } - - pub fn is_match(&self, s: &str) -> bool { - s.contains(&self.inner) - } -} - -impl fmt::Display for Filter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/humantime/extern_impl.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/humantime/extern_impl.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/humantime/extern_impl.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/humantime/extern_impl.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,118 +0,0 @@ -use std::fmt; -use std::time::SystemTime; - -use humantime::{ - format_rfc3339_micros, format_rfc3339_millis, format_rfc3339_nanos, format_rfc3339_seconds, -}; - -use crate::fmt::{Formatter, TimestampPrecision}; - -pub(in crate::fmt) mod glob { - pub use super::*; -} - -impl Formatter { - /// Get a [`Timestamp`] for the current date and time in UTC. - /// - /// # Examples - /// - /// Include the current timestamp with the log record: - /// - /// ``` - /// use std::io::Write; - /// - /// let mut builder = env_logger::Builder::new(); - /// - /// builder.format(|buf, record| { - /// let ts = buf.timestamp(); - /// - /// writeln!(buf, "{}: {}: {}", ts, record.level(), record.args()) - /// }); - /// ``` - /// - /// [`Timestamp`]: struct.Timestamp.html - pub fn timestamp(&self) -> Timestamp { - Timestamp { - time: SystemTime::now(), - precision: TimestampPrecision::Seconds, - } - } - - /// Get a [`Timestamp`] for the current date and time in UTC with full - /// second precision. - pub fn timestamp_seconds(&self) -> Timestamp { - Timestamp { - time: SystemTime::now(), - precision: TimestampPrecision::Seconds, - } - } - - /// Get a [`Timestamp`] for the current date and time in UTC with - /// millisecond precision. - pub fn timestamp_millis(&self) -> Timestamp { - Timestamp { - time: SystemTime::now(), - precision: TimestampPrecision::Millis, - } - } - - /// Get a [`Timestamp`] for the current date and time in UTC with - /// microsecond precision. - pub fn timestamp_micros(&self) -> Timestamp { - Timestamp { - time: SystemTime::now(), - precision: TimestampPrecision::Micros, - } - } - - /// Get a [`Timestamp`] for the current date and time in UTC with - /// nanosecond precision. - pub fn timestamp_nanos(&self) -> Timestamp { - Timestamp { - time: SystemTime::now(), - precision: TimestampPrecision::Nanos, - } - } -} - -/// An [RFC3339] formatted timestamp. -/// -/// The timestamp implements [`Display`] and can be written to a [`Formatter`]. -/// -/// [RFC3339]: https://www.ietf.org/rfc/rfc3339.txt -/// [`Display`]: https://doc.rust-lang.org/stable/std/fmt/trait.Display.html -/// [`Formatter`]: struct.Formatter.html -pub struct Timestamp { - time: SystemTime, - precision: TimestampPrecision, -} - -impl fmt::Debug for Timestamp { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - /// A `Debug` wrapper for `Timestamp` that uses the `Display` implementation. - struct TimestampValue<'a>(&'a Timestamp); - - impl<'a> fmt::Debug for TimestampValue<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.0, f) - } - } - - f.debug_tuple("Timestamp") - .field(&TimestampValue(self)) - .finish() - } -} - -impl fmt::Display for Timestamp { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let formatter = match self.precision { - TimestampPrecision::Seconds => format_rfc3339_seconds, - TimestampPrecision::Millis => format_rfc3339_millis, - TimestampPrecision::Micros => format_rfc3339_micros, - TimestampPrecision::Nanos => format_rfc3339_nanos, - }; - - formatter(self.time).fmt(f) - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/humantime/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/humantime/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/humantime/mod.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/humantime/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -/* -This internal module contains the timestamp implementation. - -Its public API is available when the `humantime` crate is available. -*/ - -#[cfg_attr(feature = "humantime", path = "extern_impl.rs")] -#[cfg_attr(not(feature = "humantime"), path = "shim_impl.rs")] -mod imp; - -pub(in crate::fmt) use self::imp::*; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/humantime/shim_impl.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/humantime/shim_impl.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/humantime/shim_impl.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/humantime/shim_impl.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -/* -Timestamps aren't available when we don't have a `humantime` dependency. -*/ - -pub(in crate::fmt) mod glob {} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/mod.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,652 +0,0 @@ -//! Formatting for log records. -//! -//! This module contains a [`Formatter`] that can be used to format log records -//! into without needing temporary allocations. Usually you won't need to worry -//! about the contents of this module and can use the `Formatter` like an ordinary -//! [`Write`]. -//! -//! # Formatting log records -//! -//! The format used to print log records can be customised using the [`Builder::format`] -//! method. -//! Custom formats can apply different color and weight to printed values using -//! [`Style`] builders. -//! -//! ``` -//! use std::io::Write; -//! -//! let mut builder = env_logger::Builder::new(); -//! -//! builder.format(|buf, record| { -//! writeln!(buf, "{}: {}", -//! record.level(), -//! record.args()) -//! }); -//! ``` -//! -//! [`Formatter`]: struct.Formatter.html -//! [`Style`]: struct.Style.html -//! [`Builder::format`]: ../struct.Builder.html#method.format -//! [`Write`]: https://doc.rust-lang.org/stable/std/io/trait.Write.html - -use std::cell::RefCell; -use std::fmt::Display; -use std::io::prelude::*; -use std::rc::Rc; -use std::{fmt, io, mem}; - -use log::Record; - -mod humantime; -pub(crate) mod writer; - -pub use self::humantime::glob::*; -pub use self::writer::glob::*; - -use self::writer::{Buffer, Writer}; - -pub(crate) mod glob { - pub use super::{Target, TimestampPrecision, WriteStyle}; -} - -/// Formatting precision of timestamps. -/// -/// Seconds give precision of full seconds, milliseconds give thousands of a -/// second (3 decimal digits), microseconds are millionth of a second (6 decimal -/// digits) and nanoseconds are billionth of a second (9 decimal digits). -#[derive(Copy, Clone, Debug)] -pub enum TimestampPrecision { - /// Full second precision (0 decimal digits) - Seconds, - /// Millisecond precision (3 decimal digits) - Millis, - /// Microsecond precision (6 decimal digits) - Micros, - /// Nanosecond precision (9 decimal digits) - Nanos, -} - -/// The default timestamp precision is seconds. -impl Default for TimestampPrecision { - fn default() -> Self { - TimestampPrecision::Seconds - } -} - -/// A formatter to write logs into. -/// -/// `Formatter` implements the standard [`Write`] trait for writing log records. -/// It also supports terminal colors, through the [`style`] method. -/// -/// # Examples -/// -/// Use the [`writeln`] macro to format a log record. -/// An instance of a `Formatter` is passed to an `env_logger` format as `buf`: -/// -/// ``` -/// use std::io::Write; -/// -/// let mut builder = env_logger::Builder::new(); -/// -/// builder.format(|buf, record| writeln!(buf, "{}: {}", record.level(), record.args())); -/// ``` -/// -/// [`Write`]: https://doc.rust-lang.org/stable/std/io/trait.Write.html -/// [`writeln`]: https://doc.rust-lang.org/stable/std/macro.writeln.html -/// [`style`]: #method.style -pub struct Formatter { - buf: Rc>, - write_style: WriteStyle, -} - -impl Formatter { - pub(crate) fn new(writer: &Writer) -> Self { - Formatter { - buf: Rc::new(RefCell::new(writer.buffer())), - write_style: writer.write_style(), - } - } - - pub(crate) fn write_style(&self) -> WriteStyle { - self.write_style - } - - pub(crate) fn print(&self, writer: &Writer) -> io::Result<()> { - writer.print(&self.buf.borrow()) - } - - pub(crate) fn clear(&mut self) { - self.buf.borrow_mut().clear() - } -} - -impl Write for Formatter { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.buf.borrow_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.buf.borrow_mut().flush() - } -} - -impl fmt::Debug for Formatter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Formatter").finish() - } -} - -pub(crate) type FormatFn = Box io::Result<()> + Sync + Send>; - -pub(crate) struct Builder { - pub format_timestamp: Option, - pub format_module_path: bool, - pub format_target: bool, - pub format_level: bool, - pub format_indent: Option, - pub custom_format: Option, - pub format_suffix: &'static str, - built: bool, -} - -impl Default for Builder { - fn default() -> Self { - Builder { - format_timestamp: Some(Default::default()), - format_module_path: false, - format_target: true, - format_level: true, - format_indent: Some(4), - custom_format: None, - format_suffix: "\n", - built: false, - } - } -} - -impl Builder { - /// Convert the format into a callable function. - /// - /// If the `custom_format` is `Some`, then any `default_format` switches are ignored. - /// If the `custom_format` is `None`, then a default format is returned. - /// Any `default_format` switches set to `false` won't be written by the format. - pub fn build(&mut self) -> FormatFn { - assert!(!self.built, "attempt to re-use consumed builder"); - - let built = mem::replace( - self, - Builder { - built: true, - ..Default::default() - }, - ); - - if let Some(fmt) = built.custom_format { - fmt - } else { - Box::new(move |buf, record| { - let fmt = DefaultFormat { - timestamp: built.format_timestamp, - module_path: built.format_module_path, - target: built.format_target, - level: built.format_level, - written_header_value: false, - indent: built.format_indent, - suffix: built.format_suffix, - buf, - }; - - fmt.write(record) - }) - } - } -} - -#[cfg(feature = "termcolor")] -type SubtleStyle = StyledValue<'static, &'static str>; -#[cfg(not(feature = "termcolor"))] -type SubtleStyle = &'static str; - -/// The default format. -/// -/// This format needs to work with any combination of crate features. -struct DefaultFormat<'a> { - timestamp: Option, - module_path: bool, - target: bool, - level: bool, - written_header_value: bool, - indent: Option, - buf: &'a mut Formatter, - suffix: &'a str, -} - -impl<'a> DefaultFormat<'a> { - fn write(mut self, record: &Record) -> io::Result<()> { - self.write_timestamp()?; - self.write_level(record)?; - self.write_module_path(record)?; - self.write_target(record)?; - self.finish_header()?; - - self.write_args(record) - } - - fn subtle_style(&self, text: &'static str) -> SubtleStyle { - #[cfg(feature = "termcolor")] - { - self.buf - .style() - .set_color(Color::Black) - .set_intense(true) - .clone() - .into_value(text) - } - #[cfg(not(feature = "termcolor"))] - { - text - } - } - - fn write_header_value(&mut self, value: T) -> io::Result<()> - where - T: Display, - { - if !self.written_header_value { - self.written_header_value = true; - - let open_brace = self.subtle_style("["); - write!(self.buf, "{}{}", open_brace, value) - } else { - write!(self.buf, " {}", value) - } - } - - fn write_level(&mut self, record: &Record) -> io::Result<()> { - if !self.level { - return Ok(()); - } - - let level = { - #[cfg(feature = "termcolor")] - { - self.buf.default_styled_level(record.level()) - } - #[cfg(not(feature = "termcolor"))] - { - record.level() - } - }; - - self.write_header_value(format_args!("{:<5}", level)) - } - - fn write_timestamp(&mut self) -> io::Result<()> { - #[cfg(feature = "humantime")] - { - use self::TimestampPrecision::*; - let ts = match self.timestamp { - None => return Ok(()), - Some(Seconds) => self.buf.timestamp_seconds(), - Some(Millis) => self.buf.timestamp_millis(), - Some(Micros) => self.buf.timestamp_micros(), - Some(Nanos) => self.buf.timestamp_nanos(), - }; - - self.write_header_value(ts) - } - #[cfg(not(feature = "humantime"))] - { - // Trick the compiler to think we have used self.timestamp - // Workaround for "field is never used: `timestamp`" compiler nag. - let _ = self.timestamp; - Ok(()) - } - } - - fn write_module_path(&mut self, record: &Record) -> io::Result<()> { - if !self.module_path { - return Ok(()); - } - - if let Some(module_path) = record.module_path() { - self.write_header_value(module_path) - } else { - Ok(()) - } - } - - fn write_target(&mut self, record: &Record) -> io::Result<()> { - if !self.target { - return Ok(()); - } - - match record.target() { - "" => Ok(()), - target => self.write_header_value(target), - } - } - - fn finish_header(&mut self) -> io::Result<()> { - if self.written_header_value { - let close_brace = self.subtle_style("]"); - write!(self.buf, "{} ", close_brace) - } else { - Ok(()) - } - } - - fn write_args(&mut self, record: &Record) -> io::Result<()> { - match self.indent { - // Fast path for no indentation - None => write!(self.buf, "{}{}", record.args(), self.suffix), - - Some(indent_count) => { - // Create a wrapper around the buffer only if we have to actually indent the message - - struct IndentWrapper<'a, 'b: 'a> { - fmt: &'a mut DefaultFormat<'b>, - indent_count: usize, - } - - impl<'a, 'b> Write for IndentWrapper<'a, 'b> { - fn write(&mut self, buf: &[u8]) -> io::Result { - let mut first = true; - for chunk in buf.split(|&x| x == b'\n') { - if !first { - write!( - self.fmt.buf, - "{}{:width$}", - self.fmt.suffix, - "", - width = self.indent_count - )?; - } - self.fmt.buf.write_all(chunk)?; - first = false; - } - - Ok(buf.len()) - } - - fn flush(&mut self) -> io::Result<()> { - self.fmt.buf.flush() - } - } - - // The explicit scope here is just to make older versions of Rust happy - { - let mut wrapper = IndentWrapper { - fmt: self, - indent_count, - }; - write!(wrapper, "{}", record.args())?; - } - - write!(self.buf, "{}", self.suffix)?; - - Ok(()) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - use log::{Level, Record}; - - fn write_record(record: Record, fmt: DefaultFormat) -> String { - let buf = fmt.buf.buf.clone(); - - fmt.write(&record).expect("failed to write record"); - - let buf = buf.borrow(); - String::from_utf8(buf.bytes().to_vec()).expect("failed to read record") - } - - fn write_target(target: &str, fmt: DefaultFormat) -> String { - write_record( - Record::builder() - .args(format_args!("log\nmessage")) - .level(Level::Info) - .file(Some("test.rs")) - .line(Some(144)) - .module_path(Some("test::path")) - .target(target) - .build(), - fmt, - ) - } - - fn write(fmt: DefaultFormat) -> String { - write_target("", fmt) - } - - #[test] - fn format_with_header() { - let writer = writer::Builder::new() - .write_style(WriteStyle::Never) - .build(); - - let mut f = Formatter::new(&writer); - - let written = write(DefaultFormat { - timestamp: None, - module_path: true, - target: false, - level: true, - written_header_value: false, - indent: None, - suffix: "\n", - buf: &mut f, - }); - - assert_eq!("[INFO test::path] log\nmessage\n", written); - } - - #[test] - fn format_no_header() { - let writer = writer::Builder::new() - .write_style(WriteStyle::Never) - .build(); - - let mut f = Formatter::new(&writer); - - let written = write(DefaultFormat { - timestamp: None, - module_path: false, - target: false, - level: false, - written_header_value: false, - indent: None, - suffix: "\n", - buf: &mut f, - }); - - assert_eq!("log\nmessage\n", written); - } - - #[test] - fn format_indent_spaces() { - let writer = writer::Builder::new() - .write_style(WriteStyle::Never) - .build(); - - let mut f = Formatter::new(&writer); - - let written = write(DefaultFormat { - timestamp: None, - module_path: true, - target: false, - level: true, - written_header_value: false, - indent: Some(4), - suffix: "\n", - buf: &mut f, - }); - - assert_eq!("[INFO test::path] log\n message\n", written); - } - - #[test] - fn format_indent_zero_spaces() { - let writer = writer::Builder::new() - .write_style(WriteStyle::Never) - .build(); - - let mut f = Formatter::new(&writer); - - let written = write(DefaultFormat { - timestamp: None, - module_path: true, - target: false, - level: true, - written_header_value: false, - indent: Some(0), - suffix: "\n", - buf: &mut f, - }); - - assert_eq!("[INFO test::path] log\nmessage\n", written); - } - - #[test] - fn format_indent_spaces_no_header() { - let writer = writer::Builder::new() - .write_style(WriteStyle::Never) - .build(); - - let mut f = Formatter::new(&writer); - - let written = write(DefaultFormat { - timestamp: None, - module_path: false, - target: false, - level: false, - written_header_value: false, - indent: Some(4), - suffix: "\n", - buf: &mut f, - }); - - assert_eq!("log\n message\n", written); - } - - #[test] - fn format_suffix() { - let writer = writer::Builder::new() - .write_style(WriteStyle::Never) - .build(); - - let mut f = Formatter::new(&writer); - - let written = write(DefaultFormat { - timestamp: None, - module_path: false, - target: false, - level: false, - written_header_value: false, - indent: None, - suffix: "\n\n", - buf: &mut f, - }); - - assert_eq!("log\nmessage\n\n", written); - } - - #[test] - fn format_suffix_with_indent() { - let writer = writer::Builder::new() - .write_style(WriteStyle::Never) - .build(); - - let mut f = Formatter::new(&writer); - - let written = write(DefaultFormat { - timestamp: None, - module_path: false, - target: false, - level: false, - written_header_value: false, - indent: Some(4), - suffix: "\n\n", - buf: &mut f, - }); - - assert_eq!("log\n\n message\n\n", written); - } - - #[test] - fn format_target() { - let writer = writer::Builder::new() - .write_style(WriteStyle::Never) - .build(); - - let mut f = Formatter::new(&writer); - - let written = write_target( - "target", - DefaultFormat { - timestamp: None, - module_path: true, - target: true, - level: true, - written_header_value: false, - indent: None, - suffix: "\n", - buf: &mut f, - }, - ); - - assert_eq!("[INFO test::path target] log\nmessage\n", written); - } - - #[test] - fn format_empty_target() { - let writer = writer::Builder::new() - .write_style(WriteStyle::Never) - .build(); - - let mut f = Formatter::new(&writer); - - let written = write(DefaultFormat { - timestamp: None, - module_path: true, - target: true, - level: true, - written_header_value: false, - indent: None, - suffix: "\n", - buf: &mut f, - }); - - assert_eq!("[INFO test::path] log\nmessage\n", written); - } - - #[test] - fn format_no_target() { - let writer = writer::Builder::new() - .write_style(WriteStyle::Never) - .build(); - - let mut f = Formatter::new(&writer); - - let written = write_target( - "target", - DefaultFormat { - timestamp: None, - module_path: true, - target: false, - level: true, - written_header_value: false, - indent: None, - suffix: "\n", - buf: &mut f, - }, - ); - - assert_eq!("[INFO test::path] log\nmessage\n", written); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/atty.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/atty.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/atty.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/atty.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -/* -This internal module contains the terminal detection implementation. - -If the `atty` crate is available then we use it to detect whether we're -attached to a particular TTY. If the `atty` crate is not available we -assume we're not attached to anything. This effectively prevents styles -from being printed. -*/ - -#[cfg(feature = "atty")] -mod imp { - pub(in crate::fmt) fn is_stdout() -> bool { - atty::is(atty::Stream::Stdout) - } - - pub(in crate::fmt) fn is_stderr() -> bool { - atty::is(atty::Stream::Stderr) - } -} - -#[cfg(not(feature = "atty"))] -mod imp { - pub(in crate::fmt) fn is_stdout() -> bool { - false - } - - pub(in crate::fmt) fn is_stderr() -> bool { - false - } -} - -pub(in crate::fmt) use self::imp::*; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/mod.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,252 +0,0 @@ -mod atty; -mod termcolor; - -use self::atty::{is_stderr, is_stdout}; -use self::termcolor::BufferWriter; -use std::{fmt, io, mem, sync::Mutex}; - -pub(super) mod glob { - pub use super::termcolor::glob::*; - pub use super::*; -} - -pub(super) use self::termcolor::Buffer; - -/// Log target, either `stdout`, `stderr` or a custom pipe. -#[non_exhaustive] -pub enum Target { - /// Logs will be sent to standard output. - Stdout, - /// Logs will be sent to standard error. - Stderr, - /// Logs will be sent to a custom pipe. - Pipe(Box), -} - -impl Default for Target { - fn default() -> Self { - Target::Stderr - } -} - -impl fmt::Debug for Target { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "{}", - match self { - Self::Stdout => "stdout", - Self::Stderr => "stderr", - Self::Pipe(_) => "pipe", - } - ) - } -} - -/// Log target, either `stdout`, `stderr` or a custom pipe. -/// -/// Same as `Target`, except the pipe is wrapped in a mutex for interior mutability. -pub(super) enum WritableTarget { - /// Logs will be sent to standard output. - Stdout, - /// Logs will be sent to standard error. - Stderr, - /// Logs will be sent to a custom pipe. - Pipe(Box>), -} - -impl From for WritableTarget { - fn from(target: Target) -> Self { - match target { - Target::Stdout => Self::Stdout, - Target::Stderr => Self::Stderr, - Target::Pipe(pipe) => Self::Pipe(Box::new(Mutex::new(pipe))), - } - } -} - -impl Default for WritableTarget { - fn default() -> Self { - Self::from(Target::default()) - } -} - -impl fmt::Debug for WritableTarget { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "{}", - match self { - Self::Stdout => "stdout", - Self::Stderr => "stderr", - Self::Pipe(_) => "pipe", - } - ) - } -} -/// Whether or not to print styles to the target. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] -pub enum WriteStyle { - /// Try to print styles, but don't force the issue. - Auto, - /// Try very hard to print styles. - Always, - /// Never print styles. - Never, -} - -impl Default for WriteStyle { - fn default() -> Self { - WriteStyle::Auto - } -} - -/// A terminal target with color awareness. -pub(crate) struct Writer { - inner: BufferWriter, - write_style: WriteStyle, -} - -impl Writer { - pub fn write_style(&self) -> WriteStyle { - self.write_style - } - - pub(super) fn buffer(&self) -> Buffer { - self.inner.buffer() - } - - pub(super) fn print(&self, buf: &Buffer) -> io::Result<()> { - self.inner.print(buf) - } -} - -/// A builder for a terminal writer. -/// -/// The target and style choice can be configured before building. -#[derive(Debug)] -pub(crate) struct Builder { - target: WritableTarget, - write_style: WriteStyle, - is_test: bool, - built: bool, -} - -impl Builder { - /// Initialize the writer builder with defaults. - pub(crate) fn new() -> Self { - Builder { - target: Default::default(), - write_style: Default::default(), - is_test: false, - built: false, - } - } - - /// Set the target to write to. - pub(crate) fn target(&mut self, target: Target) -> &mut Self { - self.target = target.into(); - self - } - - /// Parses a style choice string. - /// - /// See the [Disabling colors] section for more details. - /// - /// [Disabling colors]: ../index.html#disabling-colors - pub(crate) fn parse_write_style(&mut self, write_style: &str) -> &mut Self { - self.write_style(parse_write_style(write_style)) - } - - /// Whether or not to print style characters when writing. - pub(crate) fn write_style(&mut self, write_style: WriteStyle) -> &mut Self { - self.write_style = write_style; - self - } - - /// Whether or not to capture logs for `cargo test`. - pub(crate) fn is_test(&mut self, is_test: bool) -> &mut Self { - self.is_test = is_test; - self - } - - /// Build a terminal writer. - pub(crate) fn build(&mut self) -> Writer { - assert!(!self.built, "attempt to re-use consumed builder"); - self.built = true; - - let color_choice = match self.write_style { - WriteStyle::Auto => { - if match &self.target { - WritableTarget::Stderr => is_stderr(), - WritableTarget::Stdout => is_stdout(), - WritableTarget::Pipe(_) => false, - } { - WriteStyle::Auto - } else { - WriteStyle::Never - } - } - color_choice => color_choice, - }; - - let writer = match mem::take(&mut self.target) { - WritableTarget::Stderr => BufferWriter::stderr(self.is_test, color_choice), - WritableTarget::Stdout => BufferWriter::stdout(self.is_test, color_choice), - WritableTarget::Pipe(pipe) => BufferWriter::pipe(color_choice, pipe), - }; - - Writer { - inner: writer, - write_style: self.write_style, - } - } -} - -impl Default for Builder { - fn default() -> Self { - Builder::new() - } -} - -impl fmt::Debug for Writer { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Writer").finish() - } -} - -fn parse_write_style(spec: &str) -> WriteStyle { - match spec { - "auto" => WriteStyle::Auto, - "always" => WriteStyle::Always, - "never" => WriteStyle::Never, - _ => Default::default(), - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn parse_write_style_valid() { - let inputs = vec![ - ("auto", WriteStyle::Auto), - ("always", WriteStyle::Always), - ("never", WriteStyle::Never), - ]; - - for (input, expected) in inputs { - assert_eq!(expected, parse_write_style(input)); - } - } - - #[test] - fn parse_write_style_invalid() { - let inputs = vec!["", "true", "false", "NEVER!!"]; - - for input in inputs { - assert_eq!(WriteStyle::Auto, parse_write_style(input)); - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/termcolor/extern_impl.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/termcolor/extern_impl.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/termcolor/extern_impl.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/termcolor/extern_impl.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,532 +0,0 @@ -use std::borrow::Cow; -use std::cell::RefCell; -use std::fmt; -use std::io::{self, Write}; -use std::rc::Rc; -use std::sync::Mutex; - -use log::Level; -use termcolor::{self, ColorChoice, ColorSpec, WriteColor}; - -use crate::fmt::{Formatter, WritableTarget, WriteStyle}; - -pub(in crate::fmt::writer) mod glob { - pub use super::*; -} - -impl Formatter { - /// Begin a new [`Style`]. - /// - /// # Examples - /// - /// Create a bold, red colored style and use it to print the log level: - /// - /// ``` - /// use std::io::Write; - /// use env_logger::fmt::Color; - /// - /// let mut builder = env_logger::Builder::new(); - /// - /// builder.format(|buf, record| { - /// let mut level_style = buf.style(); - /// - /// level_style.set_color(Color::Red).set_bold(true); - /// - /// writeln!(buf, "{}: {}", - /// level_style.value(record.level()), - /// record.args()) - /// }); - /// ``` - /// - /// [`Style`]: struct.Style.html - pub fn style(&self) -> Style { - Style { - buf: self.buf.clone(), - spec: ColorSpec::new(), - } - } - - /// Get the default [`Style`] for the given level. - /// - /// The style can be used to print other values besides the level. - pub fn default_level_style(&self, level: Level) -> Style { - let mut level_style = self.style(); - match level { - Level::Trace => level_style.set_color(Color::Cyan), - Level::Debug => level_style.set_color(Color::Blue), - Level::Info => level_style.set_color(Color::Green), - Level::Warn => level_style.set_color(Color::Yellow), - Level::Error => level_style.set_color(Color::Red).set_bold(true), - }; - level_style - } - - /// Get a printable [`Style`] for the given level. - /// - /// The style can only be used to print the level. - pub fn default_styled_level(&self, level: Level) -> StyledValue<'static, Level> { - self.default_level_style(level).into_value(level) - } -} - -pub(in crate::fmt::writer) struct BufferWriter { - inner: termcolor::BufferWriter, - uncolored_target: Option, -} - -pub(in crate::fmt) struct Buffer { - inner: termcolor::Buffer, - has_uncolored_target: bool, -} - -impl BufferWriter { - pub(in crate::fmt::writer) fn stderr(is_test: bool, write_style: WriteStyle) -> Self { - BufferWriter { - inner: termcolor::BufferWriter::stderr(write_style.into_color_choice()), - uncolored_target: if is_test { - Some(WritableTarget::Stderr) - } else { - None - }, - } - } - - pub(in crate::fmt::writer) fn stdout(is_test: bool, write_style: WriteStyle) -> Self { - BufferWriter { - inner: termcolor::BufferWriter::stdout(write_style.into_color_choice()), - uncolored_target: if is_test { - Some(WritableTarget::Stdout) - } else { - None - }, - } - } - - pub(in crate::fmt::writer) fn pipe( - write_style: WriteStyle, - pipe: Box>, - ) -> Self { - BufferWriter { - // The inner Buffer is never printed from, but it is still needed to handle coloring and other formating - inner: termcolor::BufferWriter::stderr(write_style.into_color_choice()), - uncolored_target: Some(WritableTarget::Pipe(pipe)), - } - } - - pub(in crate::fmt::writer) fn buffer(&self) -> Buffer { - Buffer { - inner: self.inner.buffer(), - has_uncolored_target: self.uncolored_target.is_some(), - } - } - - pub(in crate::fmt::writer) fn print(&self, buf: &Buffer) -> io::Result<()> { - if let Some(target) = &self.uncolored_target { - // This impl uses the `eprint` and `print` macros - // instead of `termcolor`'s buffer. - // This is so their output can be captured by `cargo test` - let log = String::from_utf8_lossy(buf.bytes()); - - match target { - WritableTarget::Stderr => eprint!("{}", log), - WritableTarget::Stdout => print!("{}", log), - WritableTarget::Pipe(pipe) => write!(pipe.lock().unwrap(), "{}", log)?, - } - - Ok(()) - } else { - self.inner.print(&buf.inner) - } - } -} - -impl Buffer { - pub(in crate::fmt) fn clear(&mut self) { - self.inner.clear() - } - - pub(in crate::fmt) fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.write(buf) - } - - pub(in crate::fmt) fn flush(&mut self) -> io::Result<()> { - self.inner.flush() - } - - pub(in crate::fmt) fn bytes(&self) -> &[u8] { - self.inner.as_slice() - } - - fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> { - // Ignore styles for test captured logs because they can't be printed - if !self.has_uncolored_target { - self.inner.set_color(spec) - } else { - Ok(()) - } - } - - fn reset(&mut self) -> io::Result<()> { - // Ignore styles for test captured logs because they can't be printed - if !self.has_uncolored_target { - self.inner.reset() - } else { - Ok(()) - } - } -} - -impl WriteStyle { - fn into_color_choice(self) -> ColorChoice { - match self { - WriteStyle::Always => ColorChoice::Always, - WriteStyle::Auto => ColorChoice::Auto, - WriteStyle::Never => ColorChoice::Never, - } - } -} - -/// A set of styles to apply to the terminal output. -/// -/// Call [`Formatter::style`] to get a `Style` and use the builder methods to -/// set styling properties, like [color] and [weight]. -/// To print a value using the style, wrap it in a call to [`value`] when the log -/// record is formatted. -/// -/// # Examples -/// -/// Create a bold, red colored style and use it to print the log level: -/// -/// ``` -/// use std::io::Write; -/// use env_logger::fmt::Color; -/// -/// let mut builder = env_logger::Builder::new(); -/// -/// builder.format(|buf, record| { -/// let mut level_style = buf.style(); -/// -/// level_style.set_color(Color::Red).set_bold(true); -/// -/// writeln!(buf, "{}: {}", -/// level_style.value(record.level()), -/// record.args()) -/// }); -/// ``` -/// -/// Styles can be re-used to output multiple values: -/// -/// ``` -/// use std::io::Write; -/// use env_logger::fmt::Color; -/// -/// let mut builder = env_logger::Builder::new(); -/// -/// builder.format(|buf, record| { -/// let mut bold = buf.style(); -/// -/// bold.set_bold(true); -/// -/// writeln!(buf, "{}: {} {}", -/// bold.value(record.level()), -/// bold.value("some bold text"), -/// record.args()) -/// }); -/// ``` -/// -/// [`Formatter::style`]: struct.Formatter.html#method.style -/// [color]: #method.set_color -/// [weight]: #method.set_bold -/// [`value`]: #method.value -#[derive(Clone)] -pub struct Style { - buf: Rc>, - spec: ColorSpec, -} - -/// A value that can be printed using the given styles. -/// -/// It is the result of calling [`Style::value`]. -/// -/// [`Style::value`]: struct.Style.html#method.value -pub struct StyledValue<'a, T> { - style: Cow<'a, Style>, - value: T, -} - -impl Style { - /// Set the text color. - /// - /// # Examples - /// - /// Create a style with red text: - /// - /// ``` - /// use std::io::Write; - /// use env_logger::fmt::Color; - /// - /// let mut builder = env_logger::Builder::new(); - /// - /// builder.format(|buf, record| { - /// let mut style = buf.style(); - /// - /// style.set_color(Color::Red); - /// - /// writeln!(buf, "{}", style.value(record.args())) - /// }); - /// ``` - pub fn set_color(&mut self, color: Color) -> &mut Style { - self.spec.set_fg(Some(color.into_termcolor())); - self - } - - /// Set the text weight. - /// - /// If `yes` is true then text will be written in bold. - /// If `yes` is false then text will be written in the default weight. - /// - /// # Examples - /// - /// Create a style with bold text: - /// - /// ``` - /// use std::io::Write; - /// - /// let mut builder = env_logger::Builder::new(); - /// - /// builder.format(|buf, record| { - /// let mut style = buf.style(); - /// - /// style.set_bold(true); - /// - /// writeln!(buf, "{}", style.value(record.args())) - /// }); - /// ``` - pub fn set_bold(&mut self, yes: bool) -> &mut Style { - self.spec.set_bold(yes); - self - } - - /// Set the text intensity. - /// - /// If `yes` is true then text will be written in a brighter color. - /// If `yes` is false then text will be written in the default color. - /// - /// # Examples - /// - /// Create a style with intense text: - /// - /// ``` - /// use std::io::Write; - /// - /// let mut builder = env_logger::Builder::new(); - /// - /// builder.format(|buf, record| { - /// let mut style = buf.style(); - /// - /// style.set_intense(true); - /// - /// writeln!(buf, "{}", style.value(record.args())) - /// }); - /// ``` - pub fn set_intense(&mut self, yes: bool) -> &mut Style { - self.spec.set_intense(yes); - self - } - - /// Set whether the text is dimmed. - /// - /// If `yes` is true then text will be written in a dimmer color. - /// If `yes` is false then text will be written in the default color. - /// - /// # Examples - /// - /// Create a style with dimmed text: - /// - /// ``` - /// use std::io::Write; - /// - /// let mut builder = env_logger::Builder::new(); - /// - /// builder.format(|buf, record| { - /// let mut style = buf.style(); - /// - /// style.set_dimmed(true); - /// - /// writeln!(buf, "{}", style.value(record.args())) - /// }); - /// ``` - pub fn set_dimmed(&mut self, yes: bool) -> &mut Style { - self.spec.set_dimmed(yes); - self - } - - /// Set the background color. - /// - /// # Examples - /// - /// Create a style with a yellow background: - /// - /// ``` - /// use std::io::Write; - /// use env_logger::fmt::Color; - /// - /// let mut builder = env_logger::Builder::new(); - /// - /// builder.format(|buf, record| { - /// let mut style = buf.style(); - /// - /// style.set_bg(Color::Yellow); - /// - /// writeln!(buf, "{}", style.value(record.args())) - /// }); - /// ``` - pub fn set_bg(&mut self, color: Color) -> &mut Style { - self.spec.set_bg(Some(color.into_termcolor())); - self - } - - /// Wrap a value in the style. - /// - /// The same `Style` can be used to print multiple different values. - /// - /// # Examples - /// - /// Create a bold, red colored style and use it to print the log level: - /// - /// ``` - /// use std::io::Write; - /// use env_logger::fmt::Color; - /// - /// let mut builder = env_logger::Builder::new(); - /// - /// builder.format(|buf, record| { - /// let mut style = buf.style(); - /// - /// style.set_color(Color::Red).set_bold(true); - /// - /// writeln!(buf, "{}: {}", - /// style.value(record.level()), - /// record.args()) - /// }); - /// ``` - pub fn value(&self, value: T) -> StyledValue { - StyledValue { - style: Cow::Borrowed(self), - value, - } - } - - /// Wrap a value in the style by taking ownership of it. - pub(crate) fn into_value(self, value: T) -> StyledValue<'static, T> { - StyledValue { - style: Cow::Owned(self), - value, - } - } -} - -impl<'a, T> StyledValue<'a, T> { - fn write_fmt(&self, f: F) -> fmt::Result - where - F: FnOnce() -> fmt::Result, - { - self.style - .buf - .borrow_mut() - .set_color(&self.style.spec) - .map_err(|_| fmt::Error)?; - - // Always try to reset the terminal style, even if writing failed - let write = f(); - let reset = self.style.buf.borrow_mut().reset().map_err(|_| fmt::Error); - - write.and(reset) - } -} - -impl fmt::Debug for Style { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Style").field("spec", &self.spec).finish() - } -} - -macro_rules! impl_styled_value_fmt { - ($($fmt_trait:path),*) => { - $( - impl<'a, T: $fmt_trait> $fmt_trait for StyledValue<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter)->fmt::Result { - self.write_fmt(|| T::fmt(&self.value, f)) - } - } - )* - }; -} - -impl_styled_value_fmt!( - fmt::Debug, - fmt::Display, - fmt::Pointer, - fmt::Octal, - fmt::Binary, - fmt::UpperHex, - fmt::LowerHex, - fmt::UpperExp, - fmt::LowerExp -); - -// The `Color` type is copied from https://github.com/BurntSushi/termcolor - -/// The set of available colors for the terminal foreground/background. -/// -/// The `Ansi256` and `Rgb` colors will only output the correct codes when -/// paired with the `Ansi` `WriteColor` implementation. -/// -/// The `Ansi256` and `Rgb` color types are not supported when writing colors -/// on Windows using the console. If they are used on Windows, then they are -/// silently ignored and no colors will be emitted. -/// -/// This set may expand over time. -/// -/// This type has a `FromStr` impl that can parse colors from their human -/// readable form. The format is as follows: -/// -/// 1. Any of the explicitly listed colors in English. They are matched -/// case insensitively. -/// 2. A single 8-bit integer, in either decimal or hexadecimal format. -/// 3. A triple of 8-bit integers separated by a comma, where each integer is -/// in decimal or hexadecimal format. -/// -/// Hexadecimal numbers are written with a `0x` prefix. -#[allow(missing_docs)] -#[non_exhaustive] -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum Color { - Black, - Blue, - Green, - Red, - Cyan, - Magenta, - Yellow, - White, - Ansi256(u8), - Rgb(u8, u8, u8), -} - -impl Color { - fn into_termcolor(self) -> termcolor::Color { - match self { - Color::Black => termcolor::Color::Black, - Color::Blue => termcolor::Color::Blue, - Color::Green => termcolor::Color::Green, - Color::Red => termcolor::Color::Red, - Color::Cyan => termcolor::Color::Cyan, - Color::Magenta => termcolor::Color::Magenta, - Color::Yellow => termcolor::Color::Yellow, - Color::White => termcolor::Color::White, - Color::Ansi256(value) => termcolor::Color::Ansi256(value), - Color::Rgb(r, g, b) => termcolor::Color::Rgb(r, g, b), - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/termcolor/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/termcolor/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/termcolor/mod.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/termcolor/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -/* -This internal module contains the style and terminal writing implementation. - -Its public API is available when the `termcolor` crate is available. -The terminal printing is shimmed when the `termcolor` crate is not available. -*/ - -#[cfg_attr(feature = "termcolor", path = "extern_impl.rs")] -#[cfg_attr(not(feature = "termcolor"), path = "shim_impl.rs")] -mod imp; - -pub(in crate::fmt) use self::imp::*; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/termcolor/shim_impl.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/termcolor/shim_impl.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/termcolor/shim_impl.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/fmt/writer/termcolor/shim_impl.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -use std::{io, sync::Mutex}; - -use crate::fmt::{WritableTarget, WriteStyle}; - -pub(in crate::fmt::writer) mod glob {} - -pub(in crate::fmt::writer) struct BufferWriter { - target: WritableTarget, -} - -pub(in crate::fmt) struct Buffer(Vec); - -impl BufferWriter { - pub(in crate::fmt::writer) fn stderr(_is_test: bool, _write_style: WriteStyle) -> Self { - BufferWriter { - target: WritableTarget::Stderr, - } - } - - pub(in crate::fmt::writer) fn stdout(_is_test: bool, _write_style: WriteStyle) -> Self { - BufferWriter { - target: WritableTarget::Stdout, - } - } - - pub(in crate::fmt::writer) fn pipe( - _write_style: WriteStyle, - pipe: Box>, - ) -> Self { - BufferWriter { - target: WritableTarget::Pipe(pipe), - } - } - - pub(in crate::fmt::writer) fn buffer(&self) -> Buffer { - Buffer(Vec::new()) - } - - pub(in crate::fmt::writer) fn print(&self, buf: &Buffer) -> io::Result<()> { - // This impl uses the `eprint` and `print` macros - // instead of using the streams directly. - // This is so their output can be captured by `cargo test`. - match &self.target { - // Safety: If the target type is `Pipe`, `target_pipe` will always be non-empty. - WritableTarget::Pipe(pipe) => pipe.lock().unwrap().write_all(&buf.0)?, - WritableTarget::Stdout => print!("{}", String::from_utf8_lossy(&buf.0)), - WritableTarget::Stderr => eprint!("{}", String::from_utf8_lossy(&buf.0)), - } - - Ok(()) - } -} - -impl Buffer { - pub(in crate::fmt) fn clear(&mut self) { - self.0.clear(); - } - - pub(in crate::fmt) fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.extend(buf); - Ok(buf.len()) - } - - pub(in crate::fmt) fn flush(&mut self) -> io::Result<()> { - Ok(()) - } - - #[cfg(test)] - pub(in crate::fmt) fn bytes(&self) -> &[u8] { - &self.0 - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/lib.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/lib.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/lib.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1311 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A simple logger that can be configured via environment variables, for use -//! with the logging facade exposed by the [`log` crate][log-crate-url]. -//! -//! Despite having "env" in its name, **`env_logger`** can also be configured by -//! other means besides environment variables. See [the examples][gh-repo-examples] -//! in the source repository for more approaches. -//! -//! By default, `env_logger` writes logs to `stderr`, but can be configured to -//! instead write them to `stdout`. -//! -//! ## Example -//! -//! ``` -//! use log::{debug, error, log_enabled, info, Level}; -//! -//! env_logger::init(); -//! -//! debug!("this is a debug {}", "message"); -//! error!("this is printed by default"); -//! -//! if log_enabled!(Level::Info) { -//! let x = 3 * 4; // expensive computation -//! info!("the answer was: {}", x); -//! } -//! ``` -//! -//! Assumes the binary is `main`: -//! -//! ```{.bash} -//! $ RUST_LOG=error ./main -//! [2017-11-09T02:12:24Z ERROR main] this is printed by default -//! ``` -//! -//! ```{.bash} -//! $ RUST_LOG=info ./main -//! [2017-11-09T02:12:24Z ERROR main] this is printed by default -//! [2017-11-09T02:12:24Z INFO main] the answer was: 12 -//! ``` -//! -//! ```{.bash} -//! $ RUST_LOG=debug ./main -//! [2017-11-09T02:12:24Z DEBUG main] this is a debug message -//! [2017-11-09T02:12:24Z ERROR main] this is printed by default -//! [2017-11-09T02:12:24Z INFO main] the answer was: 12 -//! ``` -//! -//! You can also set the log level on a per module basis: -//! -//! ```{.bash} -//! $ RUST_LOG=main=info ./main -//! [2017-11-09T02:12:24Z ERROR main] this is printed by default -//! [2017-11-09T02:12:24Z INFO main] the answer was: 12 -//! ``` -//! -//! And enable all logging: -//! -//! ```{.bash} -//! $ RUST_LOG=main ./main -//! [2017-11-09T02:12:24Z DEBUG main] this is a debug message -//! [2017-11-09T02:12:24Z ERROR main] this is printed by default -//! [2017-11-09T02:12:24Z INFO main] the answer was: 12 -//! ``` -//! -//! If the binary name contains hyphens, you will need to replace -//! them with underscores: -//! -//! ```{.bash} -//! $ RUST_LOG=my_app ./my-app -//! [2017-11-09T02:12:24Z DEBUG my_app] this is a debug message -//! [2017-11-09T02:12:24Z ERROR my_app] this is printed by default -//! [2017-11-09T02:12:24Z INFO my_app] the answer was: 12 -//! ``` -//! -//! This is because Rust modules and crates cannot contain hyphens -//! in their name, although `cargo` continues to accept them. -//! -//! See the documentation for the [`log` crate][log-crate-url] for more -//! information about its API. -//! -//! ## Enabling logging -//! -//! Log levels are controlled on a per-module basis, and **by default all -//! logging is disabled except for the `error` level**. -//! -//! Logging is controlled via the **`RUST_LOG`** environment variable. The -//! value of this environment variable is a comma-separated list of *logging -//! directives*. A logging directive is of the form: -//! -//! ```text -//! example::log::target=level -//! ``` -//! -//! The log target is typically equal to the path of the module the message -//! in question originated from, though it can be overriden. -//! -//! The path is rooted in the name of the crate it was compiled for, so if -//! your program is in a file called, for example, `hello.rs`, the path would -//! simply be be `hello`. -//! -//! Furthermore, the log can be filtered using prefix-search based on the -//! specified log target. A value of, for example, `RUST_LOG=example`, would -//! match all of the messages with targets: -//! -//! * `example` -//! * `example::test` -//! * `example::test::module::submodule` -//! * `examples::and_more_examples` -//! -//! When providing the crate name or a module path, explicitly specifying the -//! log level is optional. If omitted, all logging for the item will be -//! enabled. -//! -//! The names of the log levels that may be specified correspond to the -//! variations of the [`log::Level`][level-enum] enum from the `log` -//! crate. They are: -//! -//! * `error` -//! * `warn` -//! * `info` -//! * `debug` -//! * `trace` -//! -//! There is also a pseudo logging level, `off`, which may be specified to -//! disable all logging for a given module or for the entire application. As -//! with the logging levels, the letter case is not significant[^fn-off]. -//! -//! [^fn-off]: Similar to the universe of log level names, the `off` pseudo -//! log level feature is also provided by the underlying `log` crate. -//! -//! The letter case is not significant for the logging level names; e.g., -//! `debug`, `DEBUG`, and `dEbuG` all represent the same logging level. For -//! consistency, our convention is to use the lower case names. Where our docs -//! do use other forms, they do so in the context of specific examples, so you -//! won't be surprised if you see similar usage in the wild. -//! -//! As the log level for a module is optional, the module to enable logging for -//! is also optional. **If only a level is provided, then the global log -//! level for all modules is set to this value.** -//! -//! Some examples of valid values of `RUST_LOG` are: -//! -//! * `hello` turns on all logging for the 'hello' module -//! * `trace` turns on all logging for the application, regardless of its name -//! * `TRACE` turns on all logging for the application, regardless of its name (same as previous) -//! * `info` turns on all info logging -//! * `INFO` turns on all info logging (same as previous) -//! * `hello=debug` turns on debug logging for 'hello' -//! * `hello=DEBUG` turns on debug logging for 'hello' (same as previous) -//! * `hello,std::option` turns on hello, and std's option logging -//! * `error,hello=warn` turn on global error logging and also warn for hello -//! * `error,hello=off` turn on global error logging, but turn off logging for hello -//! * `off` turns off all logging for the application -//! * `OFF` turns off all logging for the application (same as previous) -//! -//! ## Filtering results -//! -//! A `RUST_LOG` directive may include a regex filter. The syntax is to append `/` -//! followed by a regex. Each message is checked against the regex, and is only -//! logged if it matches. Note that the matching is done after formatting the -//! log string but before adding any logging meta-data. There is a single filter -//! for all modules. -//! -//! Some examples: -//! -//! * `hello/foo` turns on all logging for the 'hello' module where the log -//! message includes 'foo'. -//! * `info/f.o` turns on all info logging where the log message includes 'foo', -//! 'f1o', 'fao', etc. -//! * `hello=debug/foo*foo` turns on debug logging for 'hello' where the log -//! message includes 'foofoo' or 'fofoo' or 'fooooooofoo', etc. -//! * `error,hello=warn/[0-9]scopes` turn on global error logging and also -//! warn for hello. In both cases the log message must include a single digit -//! number followed by 'scopes'. -//! -//! ## Capturing logs in tests -//! -//! Records logged during `cargo test` will not be captured by the test harness by default. -//! The [`Builder::is_test`] method can be used in unit tests to ensure logs will be captured: -//! -//! ``` -//! # #[macro_use] extern crate log; -//! #[cfg(test)] -//! mod tests { -//! fn init() { -//! let _ = env_logger::builder().is_test(true).try_init(); -//! } -//! -//! #[test] -//! fn it_works() { -//! init(); -//! -//! info!("This record will be captured by `cargo test`"); -//! -//! assert_eq!(2, 1 + 1); -//! } -//! } -//! ``` -//! -//! Enabling test capturing comes at the expense of color and other style support -//! and may have performance implications. -//! -//! ## Disabling colors -//! -//! Colors and other styles can be configured with the `RUST_LOG_STYLE` -//! environment variable. It accepts the following values: -//! -//! * `auto` (default) will attempt to print style characters, but don't force the issue. -//! If the console isn't available on Windows, or if TERM=dumb, for example, then don't print colors. -//! * `always` will always print style characters even if they aren't supported by the terminal. -//! This includes emitting ANSI colors on Windows if the console API is unavailable. -//! * `never` will never print style characters. -//! -//! ## Tweaking the default format -//! -//! Parts of the default format can be excluded from the log output using the [`Builder`]. -//! The following example excludes the timestamp from the log output: -//! -//! ``` -//! env_logger::builder() -//! .format_timestamp(None) -//! .init(); -//! ``` -//! -//! ### Stability of the default format -//! -//! The default format won't optimise for long-term stability, and explicitly makes no -//! guarantees about the stability of its output across major, minor or patch version -//! bumps during `0.x`. -//! -//! If you want to capture or interpret the output of `env_logger` programmatically -//! then you should use a custom format. -//! -//! ### Using a custom format -//! -//! Custom formats can be provided as closures to the [`Builder`]. -//! These closures take a [`Formatter`] and `log::Record` as arguments: -//! -//! ``` -//! use std::io::Write; -//! -//! env_logger::builder() -//! .format(|buf, record| { -//! writeln!(buf, "{}: {}", record.level(), record.args()) -//! }) -//! .init(); -//! ``` -//! -//! See the [`fmt`] module for more details about custom formats. -//! -//! ## Specifying defaults for environment variables -//! -//! `env_logger` can read configuration from environment variables. -//! If these variables aren't present, the default value to use can be tweaked with the [`Env`] type. -//! The following example defaults to log `warn` and above if the `RUST_LOG` environment variable -//! isn't set: -//! -//! ``` -//! use env_logger::Env; -//! -//! env_logger::Builder::from_env(Env::default().default_filter_or("warn")).init(); -//! ``` -//! -//! [gh-repo-examples]: https://github.com/env-logger-rs/env_logger/tree/main/examples -//! [level-enum]: https://docs.rs/log/latest/log/enum.Level.html -//! [log-crate-url]: https://docs.rs/log/ -//! [`Builder`]: struct.Builder.html -//! [`Builder::is_test`]: struct.Builder.html#method.is_test -//! [`Env`]: struct.Env.html -//! [`fmt`]: fmt/index.html - -#![doc( - html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://www.rust-lang.org/static/images/favicon.ico" -)] -// When compiled for the rustc compiler itself we want to make sure that this is -// an unstable crate -#![cfg_attr(rustbuild, feature(staged_api, rustc_private))] -#![cfg_attr(rustbuild, unstable(feature = "rustc_private", issue = "27812"))] -#![deny(missing_debug_implementations, missing_docs)] - -use std::{borrow::Cow, cell::RefCell, env, io}; - -use log::{LevelFilter, Log, Metadata, Record, SetLoggerError}; - -pub mod filter; -pub mod fmt; - -pub use self::fmt::glob::*; - -use self::filter::Filter; -use self::fmt::writer::{self, Writer}; -use self::fmt::{FormatFn, Formatter}; - -/// The default name for the environment variable to read filters from. -pub const DEFAULT_FILTER_ENV: &str = "RUST_LOG"; - -/// The default name for the environment variable to read style preferences from. -pub const DEFAULT_WRITE_STYLE_ENV: &str = "RUST_LOG_STYLE"; - -/// Set of environment variables to configure from. -/// -/// # Default environment variables -/// -/// By default, the `Env` will read the following environment variables: -/// -/// - `RUST_LOG`: the level filter -/// - `RUST_LOG_STYLE`: whether or not to print styles with records. -/// -/// These sources can be configured using the builder methods on `Env`. -#[derive(Debug)] -pub struct Env<'a> { - filter: Var<'a>, - write_style: Var<'a>, -} - -#[derive(Debug)] -struct Var<'a> { - name: Cow<'a, str>, - default: Option>, -} - -/// The env logger. -/// -/// This struct implements the `Log` trait from the [`log` crate][log-crate-url], -/// which allows it to act as a logger. -/// -/// The [`init()`], [`try_init()`], [`Builder::init()`] and [`Builder::try_init()`] -/// methods will each construct a `Logger` and immediately initialize it as the -/// default global logger. -/// -/// If you'd instead need access to the constructed `Logger`, you can use -/// the associated [`Builder`] and install it with the -/// [`log` crate][log-crate-url] directly. -/// -/// [log-crate-url]: https://docs.rs/log/ -/// [`init()`]: fn.init.html -/// [`try_init()`]: fn.try_init.html -/// [`Builder::init()`]: struct.Builder.html#method.init -/// [`Builder::try_init()`]: struct.Builder.html#method.try_init -/// [`Builder`]: struct.Builder.html -pub struct Logger { - writer: Writer, - filter: Filter, - format: FormatFn, -} - -/// `Builder` acts as builder for initializing a `Logger`. -/// -/// It can be used to customize the log format, change the environment variable used -/// to provide the logging directives and also set the default log level filter. -/// -/// # Examples -/// -/// ``` -/// # #[macro_use] extern crate log; -/// # use std::io::Write; -/// use env_logger::Builder; -/// use log::LevelFilter; -/// -/// let mut builder = Builder::from_default_env(); -/// -/// builder -/// .format(|buf, record| writeln!(buf, "{} - {}", record.level(), record.args())) -/// .filter(None, LevelFilter::Info) -/// .init(); -/// -/// error!("error message"); -/// info!("info message"); -/// ``` -#[derive(Default)] -pub struct Builder { - filter: filter::Builder, - writer: writer::Builder, - format: fmt::Builder, - built: bool, -} - -impl Builder { - /// Initializes the log builder with defaults. - /// - /// **NOTE:** This method won't read from any environment variables. - /// Use the [`filter`] and [`write_style`] methods to configure the builder - /// or use [`from_env`] or [`from_default_env`] instead. - /// - /// # Examples - /// - /// Create a new builder and configure filters and style: - /// - /// ``` - /// use log::LevelFilter; - /// use env_logger::{Builder, WriteStyle}; - /// - /// let mut builder = Builder::new(); - /// - /// builder - /// .filter(None, LevelFilter::Info) - /// .write_style(WriteStyle::Always) - /// .init(); - /// ``` - /// - /// [`filter`]: #method.filter - /// [`write_style`]: #method.write_style - /// [`from_env`]: #method.from_env - /// [`from_default_env`]: #method.from_default_env - pub fn new() -> Builder { - Default::default() - } - - /// Initializes the log builder from the environment. - /// - /// The variables used to read configuration from can be tweaked before - /// passing in. - /// - /// # Examples - /// - /// Initialise a logger reading the log filter from an environment variable - /// called `MY_LOG`: - /// - /// ``` - /// use env_logger::Builder; - /// - /// let mut builder = Builder::from_env("MY_LOG"); - /// builder.init(); - /// ``` - /// - /// Initialise a logger using the `MY_LOG` variable for filtering and - /// `MY_LOG_STYLE` for whether or not to write styles: - /// - /// ``` - /// use env_logger::{Builder, Env}; - /// - /// let env = Env::new().filter("MY_LOG").write_style("MY_LOG_STYLE"); - /// - /// let mut builder = Builder::from_env(env); - /// builder.init(); - /// ``` - pub fn from_env<'a, E>(env: E) -> Self - where - E: Into>, - { - let mut builder = Builder::new(); - builder.parse_env(env); - builder - } - - /// Applies the configuration from the environment. - /// - /// This function allows a builder to be configured with default parameters, - /// to be then overridden by the environment. - /// - /// # Examples - /// - /// Initialise a logger with filter level `Off`, then override the log - /// filter from an environment variable called `MY_LOG`: - /// - /// ``` - /// use log::LevelFilter; - /// use env_logger::Builder; - /// - /// let mut builder = Builder::new(); - /// - /// builder.filter_level(LevelFilter::Off); - /// builder.parse_env("MY_LOG"); - /// builder.init(); - /// ``` - /// - /// Initialise a logger with filter level `Off`, then use the `MY_LOG` - /// variable to override filtering and `MY_LOG_STYLE` to override whether - /// or not to write styles: - /// - /// ``` - /// use log::LevelFilter; - /// use env_logger::{Builder, Env}; - /// - /// let env = Env::new().filter("MY_LOG").write_style("MY_LOG_STYLE"); - /// - /// let mut builder = Builder::new(); - /// builder.filter_level(LevelFilter::Off); - /// builder.parse_env(env); - /// builder.init(); - /// ``` - pub fn parse_env<'a, E>(&mut self, env: E) -> &mut Self - where - E: Into>, - { - let env = env.into(); - - if let Some(s) = env.get_filter() { - self.parse_filters(&s); - } - - if let Some(s) = env.get_write_style() { - self.parse_write_style(&s); - } - - self - } - - /// Initializes the log builder from the environment using default variable names. - /// - /// This method is a convenient way to call `from_env(Env::default())` without - /// having to use the `Env` type explicitly. The builder will use the - /// [default environment variables]. - /// - /// # Examples - /// - /// Initialise a logger using the default environment variables: - /// - /// ``` - /// use env_logger::Builder; - /// - /// let mut builder = Builder::from_default_env(); - /// builder.init(); - /// ``` - /// - /// [default environment variables]: struct.Env.html#default-environment-variables - pub fn from_default_env() -> Self { - Self::from_env(Env::default()) - } - - /// Applies the configuration from the environment using default variable names. - /// - /// This method is a convenient way to call `parse_env(Env::default())` without - /// having to use the `Env` type explicitly. The builder will use the - /// [default environment variables]. - /// - /// # Examples - /// - /// Initialise a logger with filter level `Off`, then configure it using the - /// default environment variables: - /// - /// ``` - /// use log::LevelFilter; - /// use env_logger::Builder; - /// - /// let mut builder = Builder::new(); - /// builder.filter_level(LevelFilter::Off); - /// builder.parse_default_env(); - /// builder.init(); - /// ``` - /// - /// [default environment variables]: struct.Env.html#default-environment-variables - pub fn parse_default_env(&mut self) -> &mut Self { - self.parse_env(Env::default()) - } - - /// Sets the format function for formatting the log output. - /// - /// This function is called on each record logged and should format the - /// log record and output it to the given [`Formatter`]. - /// - /// The format function is expected to output the string directly to the - /// `Formatter` so that implementations can use the [`std::fmt`] macros - /// to format and output without intermediate heap allocations. The default - /// `env_logger` formatter takes advantage of this. - /// - /// # Examples - /// - /// Use a custom format to write only the log message: - /// - /// ``` - /// use std::io::Write; - /// use env_logger::Builder; - /// - /// let mut builder = Builder::new(); - /// - /// builder.format(|buf, record| writeln!(buf, "{}", record.args())); - /// ``` - /// - /// [`Formatter`]: fmt/struct.Formatter.html - /// [`String`]: https://doc.rust-lang.org/stable/std/string/struct.String.html - /// [`std::fmt`]: https://doc.rust-lang.org/std/fmt/index.html - pub fn format(&mut self, format: F) -> &mut Self - where - F: Fn(&mut Formatter, &Record) -> io::Result<()> + Sync + Send, - { - self.format.custom_format = Some(Box::new(format)); - self - } - - /// Use the default format. - /// - /// This method will clear any custom format set on the builder. - pub fn default_format(&mut self) -> &mut Self { - self.format = Default::default(); - self - } - - /// Whether or not to write the level in the default format. - pub fn format_level(&mut self, write: bool) -> &mut Self { - self.format.format_level = write; - self - } - - /// Whether or not to write the module path in the default format. - pub fn format_module_path(&mut self, write: bool) -> &mut Self { - self.format.format_module_path = write; - self - } - - /// Whether or not to write the target in the default format. - pub fn format_target(&mut self, write: bool) -> &mut Self { - self.format.format_target = write; - self - } - - /// Configures the amount of spaces to use to indent multiline log records. - /// A value of `None` disables any kind of indentation. - pub fn format_indent(&mut self, indent: Option) -> &mut Self { - self.format.format_indent = indent; - self - } - - /// Configures if timestamp should be included and in what precision. - pub fn format_timestamp(&mut self, timestamp: Option) -> &mut Self { - self.format.format_timestamp = timestamp; - self - } - - /// Configures the timestamp to use second precision. - pub fn format_timestamp_secs(&mut self) -> &mut Self { - self.format_timestamp(Some(fmt::TimestampPrecision::Seconds)) - } - - /// Configures the timestamp to use millisecond precision. - pub fn format_timestamp_millis(&mut self) -> &mut Self { - self.format_timestamp(Some(fmt::TimestampPrecision::Millis)) - } - - /// Configures the timestamp to use microsecond precision. - pub fn format_timestamp_micros(&mut self) -> &mut Self { - self.format_timestamp(Some(fmt::TimestampPrecision::Micros)) - } - - /// Configures the timestamp to use nanosecond precision. - pub fn format_timestamp_nanos(&mut self) -> &mut Self { - self.format_timestamp(Some(fmt::TimestampPrecision::Nanos)) - } - - /// Configures the end of line suffix. - pub fn format_suffix(&mut self, suffix: &'static str) -> &mut Self { - self.format.format_suffix = suffix; - self - } - - /// Adds a directive to the filter for a specific module. - /// - /// # Examples - /// - /// Only include messages for info and above for logs in `path::to::module`: - /// - /// ``` - /// use env_logger::Builder; - /// use log::LevelFilter; - /// - /// let mut builder = Builder::new(); - /// - /// builder.filter_module("path::to::module", LevelFilter::Info); - /// ``` - pub fn filter_module(&mut self, module: &str, level: LevelFilter) -> &mut Self { - self.filter.filter_module(module, level); - self - } - - /// Adds a directive to the filter for all modules. - /// - /// # Examples - /// - /// Only include messages for info and above for logs globally: - /// - /// ``` - /// use env_logger::Builder; - /// use log::LevelFilter; - /// - /// let mut builder = Builder::new(); - /// - /// builder.filter_level(LevelFilter::Info); - /// ``` - pub fn filter_level(&mut self, level: LevelFilter) -> &mut Self { - self.filter.filter_level(level); - self - } - - /// Adds filters to the logger. - /// - /// The given module (if any) will log at most the specified level provided. - /// If no module is provided then the filter will apply to all log messages. - /// - /// # Examples - /// - /// Only include messages for info and above for logs in `path::to::module`: - /// - /// ``` - /// use env_logger::Builder; - /// use log::LevelFilter; - /// - /// let mut builder = Builder::new(); - /// - /// builder.filter(Some("path::to::module"), LevelFilter::Info); - /// ``` - pub fn filter(&mut self, module: Option<&str>, level: LevelFilter) -> &mut Self { - self.filter.filter(module, level); - self - } - - /// Parses the directives string in the same form as the `RUST_LOG` - /// environment variable. - /// - /// See the module documentation for more details. - pub fn parse_filters(&mut self, filters: &str) -> &mut Self { - self.filter.parse(filters); - self - } - - /// Sets the target for the log output. - /// - /// Env logger can log to either stdout, stderr or a custom pipe. The default is stderr. - /// - /// The custom pipe can be used to send the log messages to a custom sink (for example a file). - /// Do note that direct writes to a file can become a bottleneck due to IO operation times. - /// - /// # Examples - /// - /// Write log message to `stdout`: - /// - /// ``` - /// use env_logger::{Builder, Target}; - /// - /// let mut builder = Builder::new(); - /// - /// builder.target(Target::Stdout); - /// ``` - pub fn target(&mut self, target: fmt::Target) -> &mut Self { - self.writer.target(target); - self - } - - /// Sets whether or not styles will be written. - /// - /// This can be useful in environments that don't support control characters - /// for setting colors. - /// - /// # Examples - /// - /// Never attempt to write styles: - /// - /// ``` - /// use env_logger::{Builder, WriteStyle}; - /// - /// let mut builder = Builder::new(); - /// - /// builder.write_style(WriteStyle::Never); - /// ``` - pub fn write_style(&mut self, write_style: fmt::WriteStyle) -> &mut Self { - self.writer.write_style(write_style); - self - } - - /// Parses whether or not to write styles in the same form as the `RUST_LOG_STYLE` - /// environment variable. - /// - /// See the module documentation for more details. - pub fn parse_write_style(&mut self, write_style: &str) -> &mut Self { - self.writer.parse_write_style(write_style); - self - } - - /// Sets whether or not the logger will be used in unit tests. - /// - /// If `is_test` is `true` then the logger will allow the testing framework to - /// capture log records rather than printing them to the terminal directly. - pub fn is_test(&mut self, is_test: bool) -> &mut Self { - self.writer.is_test(is_test); - self - } - - /// Initializes the global logger with the built env logger. - /// - /// This should be called early in the execution of a Rust program. Any log - /// events that occur before initialization will be ignored. - /// - /// # Errors - /// - /// This function will fail if it is called more than once, or if another - /// library has already initialized a global logger. - pub fn try_init(&mut self) -> Result<(), SetLoggerError> { - let logger = self.build(); - - let max_level = logger.filter(); - let r = log::set_boxed_logger(Box::new(logger)); - - if r.is_ok() { - log::set_max_level(max_level); - } - - r - } - - /// Initializes the global logger with the built env logger. - /// - /// This should be called early in the execution of a Rust program. Any log - /// events that occur before initialization will be ignored. - /// - /// # Panics - /// - /// This function will panic if it is called more than once, or if another - /// library has already initialized a global logger. - pub fn init(&mut self) { - self.try_init() - .expect("Builder::init should not be called after logger initialized"); - } - - /// Build an env logger. - /// - /// The returned logger implements the `Log` trait and can be installed manually - /// or nested within another logger. - pub fn build(&mut self) -> Logger { - assert!(!self.built, "attempt to re-use consumed builder"); - self.built = true; - - Logger { - writer: self.writer.build(), - filter: self.filter.build(), - format: self.format.build(), - } - } -} - -impl Logger { - /// Creates the logger from the environment. - /// - /// The variables used to read configuration from can be tweaked before - /// passing in. - /// - /// # Examples - /// - /// Create a logger reading the log filter from an environment variable - /// called `MY_LOG`: - /// - /// ``` - /// use env_logger::Logger; - /// - /// let logger = Logger::from_env("MY_LOG"); - /// ``` - /// - /// Create a logger using the `MY_LOG` variable for filtering and - /// `MY_LOG_STYLE` for whether or not to write styles: - /// - /// ``` - /// use env_logger::{Logger, Env}; - /// - /// let env = Env::new().filter_or("MY_LOG", "info").write_style_or("MY_LOG_STYLE", "always"); - /// - /// let logger = Logger::from_env(env); - /// ``` - pub fn from_env<'a, E>(env: E) -> Self - where - E: Into>, - { - Builder::from_env(env).build() - } - - /// Creates the logger from the environment using default variable names. - /// - /// This method is a convenient way to call `from_env(Env::default())` without - /// having to use the `Env` type explicitly. The logger will use the - /// [default environment variables]. - /// - /// # Examples - /// - /// Creates a logger using the default environment variables: - /// - /// ``` - /// use env_logger::Logger; - /// - /// let logger = Logger::from_default_env(); - /// ``` - /// - /// [default environment variables]: struct.Env.html#default-environment-variables - pub fn from_default_env() -> Self { - Builder::from_default_env().build() - } - - /// Returns the maximum `LevelFilter` that this env logger instance is - /// configured to output. - pub fn filter(&self) -> LevelFilter { - self.filter.filter() - } - - /// Checks if this record matches the configured filter. - pub fn matches(&self, record: &Record) -> bool { - self.filter.matches(record) - } -} - -impl Log for Logger { - fn enabled(&self, metadata: &Metadata) -> bool { - self.filter.enabled(metadata) - } - - fn log(&self, record: &Record) { - if self.matches(record) { - // Log records are written to a thread-local buffer before being printed - // to the terminal. We clear these buffers afterwards, but they aren't shrinked - // so will always at least have capacity for the largest log record formatted - // on that thread. - // - // If multiple `Logger`s are used by the same threads then the thread-local - // formatter might have different color support. If this is the case the - // formatter and its buffer are discarded and recreated. - - thread_local! { - static FORMATTER: RefCell> = RefCell::new(None); - } - - let print = |formatter: &mut Formatter, record: &Record| { - let _ = - (self.format)(formatter, record).and_then(|_| formatter.print(&self.writer)); - - // Always clear the buffer afterwards - formatter.clear(); - }; - - let printed = FORMATTER - .try_with(|tl_buf| { - match tl_buf.try_borrow_mut() { - // There are no active borrows of the buffer - Ok(mut tl_buf) => match *tl_buf { - // We have a previously set formatter - Some(ref mut formatter) => { - // Check the buffer style. If it's different from the logger's - // style then drop the buffer and recreate it. - if formatter.write_style() != self.writer.write_style() { - *formatter = Formatter::new(&self.writer); - } - - print(formatter, record); - } - // We don't have a previously set formatter - None => { - let mut formatter = Formatter::new(&self.writer); - print(&mut formatter, record); - - *tl_buf = Some(formatter); - } - }, - // There's already an active borrow of the buffer (due to re-entrancy) - Err(_) => { - print(&mut Formatter::new(&self.writer), record); - } - } - }) - .is_ok(); - - if !printed { - // The thread-local storage was not available (because its - // destructor has already run). Create a new single-use - // Formatter on the stack for this call. - print(&mut Formatter::new(&self.writer), record); - } - } - } - - fn flush(&self) {} -} - -impl<'a> Env<'a> { - /// Get a default set of environment variables. - pub fn new() -> Self { - Self::default() - } - - /// Specify an environment variable to read the filter from. - pub fn filter(mut self, filter_env: E) -> Self - where - E: Into>, - { - self.filter = Var::new(filter_env); - - self - } - - /// Specify an environment variable to read the filter from. - /// - /// If the variable is not set, the default value will be used. - pub fn filter_or(mut self, filter_env: E, default: V) -> Self - where - E: Into>, - V: Into>, - { - self.filter = Var::new_with_default(filter_env, default); - - self - } - - /// Use the default environment variable to read the filter from. - /// - /// If the variable is not set, the default value will be used. - pub fn default_filter_or(mut self, default: V) -> Self - where - V: Into>, - { - self.filter = Var::new_with_default(DEFAULT_FILTER_ENV, default); - - self - } - - fn get_filter(&self) -> Option { - self.filter.get() - } - - /// Specify an environment variable to read the style from. - pub fn write_style(mut self, write_style_env: E) -> Self - where - E: Into>, - { - self.write_style = Var::new(write_style_env); - - self - } - - /// Specify an environment variable to read the style from. - /// - /// If the variable is not set, the default value will be used. - pub fn write_style_or(mut self, write_style_env: E, default: V) -> Self - where - E: Into>, - V: Into>, - { - self.write_style = Var::new_with_default(write_style_env, default); - - self - } - - /// Use the default environment variable to read the style from. - /// - /// If the variable is not set, the default value will be used. - pub fn default_write_style_or(mut self, default: V) -> Self - where - V: Into>, - { - self.write_style = Var::new_with_default(DEFAULT_WRITE_STYLE_ENV, default); - - self - } - - fn get_write_style(&self) -> Option { - self.write_style.get() - } -} - -impl<'a> Var<'a> { - fn new(name: E) -> Self - where - E: Into>, - { - Var { - name: name.into(), - default: None, - } - } - - fn new_with_default(name: E, default: V) -> Self - where - E: Into>, - V: Into>, - { - Var { - name: name.into(), - default: Some(default.into()), - } - } - - fn get(&self) -> Option { - env::var(&*self.name) - .ok() - .or_else(|| self.default.to_owned().map(|v| v.into_owned())) - } -} - -impl<'a, T> From for Env<'a> -where - T: Into>, -{ - fn from(filter_env: T) -> Self { - Env::default().filter(filter_env.into()) - } -} - -impl<'a> Default for Env<'a> { - fn default() -> Self { - Env { - filter: Var::new(DEFAULT_FILTER_ENV), - write_style: Var::new(DEFAULT_WRITE_STYLE_ENV), - } - } -} - -mod std_fmt_impls { - use super::*; - use std::fmt; - - impl fmt::Debug for Logger { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Logger") - .field("filter", &self.filter) - .finish() - } - } - - impl fmt::Debug for Builder { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.built { - f.debug_struct("Logger").field("built", &true).finish() - } else { - f.debug_struct("Logger") - .field("filter", &self.filter) - .field("writer", &self.writer) - .finish() - } - } - } -} - -/// Attempts to initialize the global logger with an env logger. -/// -/// This should be called early in the execution of a Rust program. Any log -/// events that occur before initialization will be ignored. -/// -/// # Errors -/// -/// This function will fail if it is called more than once, or if another -/// library has already initialized a global logger. -pub fn try_init() -> Result<(), SetLoggerError> { - try_init_from_env(Env::default()) -} - -/// Initializes the global logger with an env logger. -/// -/// This should be called early in the execution of a Rust program. Any log -/// events that occur before initialization will be ignored. -/// -/// # Panics -/// -/// This function will panic if it is called more than once, or if another -/// library has already initialized a global logger. -pub fn init() { - try_init().expect("env_logger::init should not be called after logger initialized"); -} - -/// Attempts to initialize the global logger with an env logger from the given -/// environment variables. -/// -/// This should be called early in the execution of a Rust program. Any log -/// events that occur before initialization will be ignored. -/// -/// # Examples -/// -/// Initialise a logger using the `MY_LOG` environment variable for filters -/// and `MY_LOG_STYLE` for writing colors: -/// -/// ``` -/// use env_logger::{Builder, Env}; -/// -/// # fn run() -> Result<(), Box<::std::error::Error>> { -/// let env = Env::new().filter("MY_LOG").write_style("MY_LOG_STYLE"); -/// -/// env_logger::try_init_from_env(env)?; -/// -/// Ok(()) -/// # } -/// # run().unwrap(); -/// ``` -/// -/// # Errors -/// -/// This function will fail if it is called more than once, or if another -/// library has already initialized a global logger. -pub fn try_init_from_env<'a, E>(env: E) -> Result<(), SetLoggerError> -where - E: Into>, -{ - let mut builder = Builder::from_env(env); - - builder.try_init() -} - -/// Initializes the global logger with an env logger from the given environment -/// variables. -/// -/// This should be called early in the execution of a Rust program. Any log -/// events that occur before initialization will be ignored. -/// -/// # Examples -/// -/// Initialise a logger using the `MY_LOG` environment variable for filters -/// and `MY_LOG_STYLE` for writing colors: -/// -/// ``` -/// use env_logger::{Builder, Env}; -/// -/// let env = Env::new().filter("MY_LOG").write_style("MY_LOG_STYLE"); -/// -/// env_logger::init_from_env(env); -/// ``` -/// -/// # Panics -/// -/// This function will panic if it is called more than once, or if another -/// library has already initialized a global logger. -pub fn init_from_env<'a, E>(env: E) -where - E: Into>, -{ - try_init_from_env(env) - .expect("env_logger::init_from_env should not be called after logger initialized"); -} - -/// Create a new builder with the default environment variables. -/// -/// The builder can be configured before being initialized. -/// This is a convenient way of calling [`Builder::from_default_env`]. -/// -/// [`Builder::from_default_env`]: struct.Builder.html#method.from_default_env -pub fn builder() -> Builder { - Builder::from_default_env() -} - -/// Create a builder from the given environment variables. -/// -/// The builder can be configured before being initialized. -#[deprecated( - since = "0.8.0", - note = "Prefer `env_logger::Builder::from_env()` instead." -)] -pub fn from_env<'a, E>(env: E) -> Builder -where - E: Into>, -{ - Builder::from_env(env) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn env_get_filter_reads_from_var_if_set() { - env::set_var("env_get_filter_reads_from_var_if_set", "from var"); - - let env = Env::new().filter_or("env_get_filter_reads_from_var_if_set", "from default"); - - assert_eq!(Some("from var".to_owned()), env.get_filter()); - } - - #[test] - fn env_get_filter_reads_from_default_if_var_not_set() { - env::remove_var("env_get_filter_reads_from_default_if_var_not_set"); - - let env = Env::new().filter_or( - "env_get_filter_reads_from_default_if_var_not_set", - "from default", - ); - - assert_eq!(Some("from default".to_owned()), env.get_filter()); - } - - #[test] - fn env_get_write_style_reads_from_var_if_set() { - env::set_var("env_get_write_style_reads_from_var_if_set", "from var"); - - let env = - Env::new().write_style_or("env_get_write_style_reads_from_var_if_set", "from default"); - - assert_eq!(Some("from var".to_owned()), env.get_write_style()); - } - - #[test] - fn env_get_write_style_reads_from_default_if_var_not_set() { - env::remove_var("env_get_write_style_reads_from_default_if_var_not_set"); - - let env = Env::new().write_style_or( - "env_get_write_style_reads_from_default_if_var_not_set", - "from default", - ); - - assert_eq!(Some("from default".to_owned()), env.get_write_style()); - } - - #[test] - fn builder_parse_env_overrides_existing_filters() { - env::set_var( - "builder_parse_default_env_overrides_existing_filters", - "debug", - ); - let env = Env::new().filter("builder_parse_default_env_overrides_existing_filters"); - - let mut builder = Builder::new(); - builder.filter_level(LevelFilter::Trace); - // Overrides global level to debug - builder.parse_env(env); - - assert_eq!(builder.filter.build().filter(), LevelFilter::Debug); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/init-twice-retains-filter.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/init-twice-retains-filter.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/init-twice-retains-filter.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/init-twice-retains-filter.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -extern crate env_logger; -extern crate log; - -use std::env; -use std::process; -use std::str; - -fn main() { - if env::var("YOU_ARE_TESTING_NOW").is_ok() { - // Init from the env (which should set the max level to `Debug`) - env_logger::init(); - - assert_eq!(log::LevelFilter::Debug, log::max_level()); - - // Init again using a different max level - // This shouldn't clobber the level that was previously set - env_logger::Builder::new() - .parse_filters("info") - .try_init() - .unwrap_err(); - - assert_eq!(log::LevelFilter::Debug, log::max_level()); - return; - } - - let exe = env::current_exe().unwrap(); - let out = process::Command::new(exe) - .env("YOU_ARE_TESTING_NOW", "1") - .env("RUST_LOG", "debug") - .output() - .unwrap_or_else(|e| panic!("Unable to start child process: {}", e)); - if out.status.success() { - return; - } - - println!("test failed: {}", out.status); - println!("--- stdout\n{}", str::from_utf8(&out.stdout).unwrap()); - println!("--- stderr\n{}", str::from_utf8(&out.stderr).unwrap()); - process::exit(1); -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/log-in-log.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/log-in-log.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/log-in-log.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/log-in-log.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -#[macro_use] -extern crate log; -extern crate env_logger; - -use std::env; -use std::fmt; -use std::process; -use std::str; - -struct Foo; - -impl fmt::Display for Foo { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - info!("test"); - f.write_str("bar") - } -} - -fn main() { - env_logger::init(); - if env::var("YOU_ARE_TESTING_NOW").is_ok() { - return info!("{}", Foo); - } - - let exe = env::current_exe().unwrap(); - let out = process::Command::new(exe) - .env("YOU_ARE_TESTING_NOW", "1") - .env("RUST_LOG", "debug") - .output() - .unwrap_or_else(|e| panic!("Unable to start child process: {}", e)); - if out.status.success() { - return; - } - - println!("test failed: {}", out.status); - println!("--- stdout\n{}", str::from_utf8(&out.stdout).unwrap()); - println!("--- stderr\n{}", str::from_utf8(&out.stderr).unwrap()); - process::exit(1); -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/log_tls_dtors.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/log_tls_dtors.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/log_tls_dtors.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/log_tls_dtors.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,66 +0,0 @@ -#[macro_use] -extern crate log; -extern crate env_logger; - -use std::env; -use std::process; -use std::str; -use std::thread; - -struct DropMe; - -impl Drop for DropMe { - fn drop(&mut self) { - debug!("Dropping now"); - } -} - -fn run() { - // Use multiple thread local values to increase the chance that our TLS - // value will get destroyed after the FORMATTER key in the library - thread_local! { - static DROP_ME_0: DropMe = DropMe; - static DROP_ME_1: DropMe = DropMe; - static DROP_ME_2: DropMe = DropMe; - static DROP_ME_3: DropMe = DropMe; - static DROP_ME_4: DropMe = DropMe; - static DROP_ME_5: DropMe = DropMe; - static DROP_ME_6: DropMe = DropMe; - static DROP_ME_7: DropMe = DropMe; - static DROP_ME_8: DropMe = DropMe; - static DROP_ME_9: DropMe = DropMe; - } - DROP_ME_0.with(|_| {}); - DROP_ME_1.with(|_| {}); - DROP_ME_2.with(|_| {}); - DROP_ME_3.with(|_| {}); - DROP_ME_4.with(|_| {}); - DROP_ME_5.with(|_| {}); - DROP_ME_6.with(|_| {}); - DROP_ME_7.with(|_| {}); - DROP_ME_8.with(|_| {}); - DROP_ME_9.with(|_| {}); -} - -fn main() { - env_logger::init(); - if env::var("YOU_ARE_TESTING_NOW").is_ok() { - // Run on a separate thread because TLS values on the main thread - // won't have their destructors run if pthread is used. - // https://doc.rust-lang.org/std/thread/struct.LocalKey.html#platform-specific-behavior - thread::spawn(run).join().unwrap(); - } else { - let exe = env::current_exe().unwrap(); - let out = process::Command::new(exe) - .env("YOU_ARE_TESTING_NOW", "1") - .env("RUST_LOG", "debug") - .output() - .unwrap_or_else(|e| panic!("Unable to start child process: {}", e)); - if !out.status.success() { - println!("test failed: {}", out.status); - println!("--- stdout\n{}", str::from_utf8(&out.stdout).unwrap()); - println!("--- stderr\n{}", str::from_utf8(&out.stderr).unwrap()); - process::exit(1); - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/regexp_filter.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/regexp_filter.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/regexp_filter.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/env_logger/tests/regexp_filter.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -#[macro_use] -extern crate log; -extern crate env_logger; - -use std::env; -use std::process; -use std::str; - -fn main() { - if env::var("LOG_REGEXP_TEST").ok() == Some(String::from("1")) { - child_main(); - } else { - parent_main() - } -} - -fn child_main() { - env_logger::init(); - info!("XYZ Message"); -} - -fn run_child(rust_log: String) -> bool { - let exe = env::current_exe().unwrap(); - let out = process::Command::new(exe) - .env("LOG_REGEXP_TEST", "1") - .env("RUST_LOG", rust_log) - .output() - .unwrap_or_else(|e| panic!("Unable to start child process: {}", e)); - str::from_utf8(out.stderr.as_ref()) - .unwrap() - .contains("XYZ Message") -} - -fn assert_message_printed(rust_log: &str) { - if !run_child(rust_log.to_string()) { - panic!("RUST_LOG={} should allow the test log message", rust_log) - } -} - -fn assert_message_not_printed(rust_log: &str) { - if run_child(rust_log.to_string()) { - panic!( - "RUST_LOG={} should not allow the test log message", - rust_log - ) - } -} - -fn parent_main() { - // test normal log severity levels - assert_message_printed("info"); - assert_message_not_printed("warn"); - - // test of regular expression filters - assert_message_printed("info/XYZ"); - assert_message_not_printed("info/XXX"); -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/benches/datetime_format.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/benches/datetime_format.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/benches/datetime_format.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/benches/datetime_format.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ -#![feature(test)] -extern crate test; - -use std::io::Write; -use std::time::{Duration, UNIX_EPOCH}; - -use humantime::format_rfc3339; - -#[bench] -fn rfc3339_humantime_seconds(b: &mut test::Bencher) { - let time = UNIX_EPOCH + Duration::new(1_483_228_799, 0); - let mut buf = Vec::with_capacity(100); - b.iter(|| { - buf.truncate(0); - write!(&mut buf, "{}", format_rfc3339(time)).unwrap() - }); -} - -#[bench] -fn rfc3339_chrono(b: &mut test::Bencher) { - use chrono::{DateTime, NaiveDateTime, Utc}; - use chrono::format::Item; - use chrono::format::Item::*; - use chrono::format::Numeric::*; - use chrono::format::Fixed::*; - use chrono::format::Pad::*; - - let time = DateTime::::from_utc( - NaiveDateTime::from_timestamp(1_483_228_799, 0), Utc); - let mut buf = Vec::with_capacity(100); - - // formatting code from env_logger - const ITEMS: &[Item<'static>] = { - &[ - Numeric(Year, Zero), - Literal("-"), - Numeric(Month, Zero), - Literal("-"), - Numeric(Day, Zero), - Literal("T"), - Numeric(Hour, Zero), - Literal(":"), - Numeric(Minute, Zero), - Literal(":"), - Numeric(Second, Zero), - Fixed(TimezoneOffsetZ), - ] - }; - - - b.iter(|| { - buf.truncate(0); - write!(&mut buf, "{}", time.format_with_items(ITEMS.iter().cloned())) - .unwrap() - }); -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/benches/datetime_parse.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/benches/datetime_parse.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/benches/datetime_parse.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/benches/datetime_parse.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -#![feature(test)] -extern crate test; - -use chrono::{DateTime}; -use humantime::parse_rfc3339; - -#[bench] -fn rfc3339_humantime_seconds(b: &mut test::Bencher) { - b.iter(|| { - parse_rfc3339("2018-02-13T23:08:32Z").unwrap() - }); -} - -#[bench] -fn datetime_utc_parse_seconds(b: &mut test::Bencher) { - b.iter(|| { - DateTime::parse_from_rfc3339("2018-02-13T23:08:32Z").unwrap() - }); -} - -#[bench] -fn rfc3339_humantime_millis(b: &mut test::Bencher) { - b.iter(|| { - parse_rfc3339("2018-02-13T23:08:32.123Z").unwrap() - }); -} - -#[bench] -fn datetime_utc_parse_millis(b: &mut test::Bencher) { - b.iter(|| { - DateTime::parse_from_rfc3339("2018-02-13T23:08:32.123Z").unwrap() - }); -} - -#[bench] -fn rfc3339_humantime_nanos(b: &mut test::Bencher) { - b.iter(|| { - parse_rfc3339("2018-02-13T23:08:32.123456983Z").unwrap() - }); -} - -#[bench] -fn datetime_utc_parse_nanos(b: &mut test::Bencher) { - b.iter(|| { - DateTime::parse_from_rfc3339("2018-02-13T23:08:32.123456983Z").unwrap() - }); -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/bulk.yaml clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/bulk.yaml --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/bulk.yaml 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/bulk.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -minimum-bulk: v0.4.5 - -versions: - -- file: Cargo.toml - block-start: ^\[package\] - block-end: ^\[.*\] - regex: ^version\s*=\s*"(\S+)" diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/.cargo-checksum.json clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/.cargo-checksum.json --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/.cargo-checksum.json 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"c0d1443ae237dee3c09cb70185fa947d8d8cb660acfbcb8f650798bd4e0c019e","LICENSE-APACHE":"c6596eb7be8581c18be736c846fb9173b69eccf6ef94c5135893ec56bd92ba08","LICENSE-MIT":"f6deca8261a8f4a3403dc74c725c46051157fd36c27cd4b100277eb1f303ad11","README.md":"e4bb65f28ddffb11d7eb337e9585947651f2fc11a5e4290f0ca126e21c582c1e","benches/datetime_format.rs":"ffe2e459e9b48e8fdbfb3686f6297257d66b29369ecd6750ae9fbba527ccc681","benches/datetime_parse.rs":"8039c4bd5f1795dbb54e1e39da5988f1d2df6c86c42d8fd378094fc78074d31e","bulk.yaml":"17c2548388e0cd3a63473021a2f1e4ddedee082d79d9167cb31ad06a1890d3fc","src/date.rs":"a8159494372ba8ec8a3a0a5b69c9b185f3e7ab007f283188bf96a6f071151f20","src/duration.rs":"4939ae2d1c3056424de421c4b124d0fb387e058d9abc82a21b83b38d66a40753","src/lib.rs":"ad4dbed28080d9a64ef0100c96b20ff4988d9dde908f56e28ece7252f5932990","src/wrapper.rs":"badc640e77379a42b2fcb728337d60a764b7f00a1b5b1d50c7372ddc20941967","vagga.yaml":"8396fe1510117c1c7bc3e896b62290dcf2dd300346071297018b0077ad9e45ce"},"package":"9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"} \ No newline at end of file diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/Cargo.toml clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/Cargo.toml --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/Cargo.toml 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -edition = "2018" -name = "humantime" -version = "2.1.0" -authors = ["Paul Colomiets "] -description = " A parser and formatter for std::time::{Duration, SystemTime}\n" -homepage = "https://github.com/tailhook/humantime" -documentation = "https://docs.rs/humantime" -readme = "README.md" -keywords = ["time", "human", "human-friendly", "parser", "duration"] -categories = ["date-and-time"] -license = "MIT/Apache-2.0" -repository = "https://github.com/tailhook/humantime" - -[lib] -name = "humantime" -path = "src/lib.rs" -[dev-dependencies.chrono] -version = "0.4" - -[dev-dependencies.rand] -version = "0.6" - -[dev-dependencies.time] -version = "0.1" diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/LICENSE-APACHE clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/LICENSE-APACHE --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/LICENSE-APACHE 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/LICENSE-MIT clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/LICENSE-MIT --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/LICENSE-MIT 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -Copyright (c) 2016 The humantime Developers - -Includes parts of http date with the following copyright: -Copyright (c) 2016 Pyfisch - -Includes portions of musl libc with the following copyright: -Copyright © 2005-2013 Rich Felker - - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/README.md clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/README.md --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/README.md 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ -Human Time -========== - -**Status: stable** - -[Documentation](https://docs.rs/humantime) | -[Github](https://github.com/tailhook/humantime) | -[Crate](https://crates.io/crates/humantime) - - -Features: - -* Parses durations in free form like `15days 2min 2s` -* Formats durations in similar form `2years 2min 12us` -* Parses and formats timestamp in `rfc3339` format: `2018-01-01T12:53:00Z` -* Parses timestamps in a weaker format: `2018-01-01 12:53:00` - -Timestamp parsing/formatting is super-fast because format is basically -fixed. - -Here are some micro-benchmarks: - -``` -test result: ok. 0 passed; 0 failed; 26 ignored; 0 measured; 0 filtered out - - Running target/release/deps/datetime_format-8facb4ac832d9770 - -running 2 tests -test rfc3339_chrono ... bench: 737 ns/iter (+/- 37) -test rfc3339_humantime_seconds ... bench: 73 ns/iter (+/- 2) - -test result: ok. 0 passed; 0 failed; 0 ignored; 2 measured; 0 filtered out - - Running target/release/deps/datetime_parse-342628f877d7867c - -running 6 tests -test datetime_utc_parse_millis ... bench: 228 ns/iter (+/- 11) -test datetime_utc_parse_nanos ... bench: 236 ns/iter (+/- 10) -test datetime_utc_parse_seconds ... bench: 204 ns/iter (+/- 18) -test rfc3339_humantime_millis ... bench: 28 ns/iter (+/- 1) -test rfc3339_humantime_nanos ... bench: 36 ns/iter (+/- 2) -test rfc3339_humantime_seconds ... bench: 24 ns/iter (+/- 1) - -test result: ok. 0 passed; 0 failed; 0 ignored; 6 measured; 0 filtered out -``` - -See [humantime-serde] for serde integration (previous crate [serde-humantime] looks unmaintained). - -[serde-humantime]: https://docs.rs/serde-humantime/0.1.1/serde_humantime/ -[humantime-serde]: https://docs.rs/humantime-serde - -License -======= - -Licensed under either of - -* Apache License, Version 2.0, (./LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) -* MIT license (./LICENSE-MIT or http://opensource.org/licenses/MIT) - -at your option. - -Contribution ------------- - -Unless you explicitly state otherwise, any contribution intentionally -submitted for inclusion in the work by you, as defined in the Apache-2.0 -license, shall be dual licensed as above, without any additional terms or -conditions. diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/src/date.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/src/date.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/src/date.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/src/date.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,623 +0,0 @@ -use std::error::Error as StdError; -use std::fmt; -use std::str; -use std::time::{SystemTime, Duration, UNIX_EPOCH}; - -#[cfg(target_os="cloudabi")] -mod max { - pub const SECONDS: u64 = ::std::u64::MAX / 1_000_000_000; - #[allow(unused)] - pub const TIMESTAMP: &'static str = "2554-07-21T23:34:33Z"; -} -#[cfg(all( - target_pointer_width="32", - not(target_os="cloudabi"), - not(target_os="windows"), - not(all(target_arch="wasm32", not(target_os="emscripten"))) -))] -mod max { - pub const SECONDS: u64 = ::std::i32::MAX as u64; - #[allow(unused)] - pub const TIMESTAMP: &'static str = "2038-01-19T03:14:07Z"; -} - -#[cfg(any( - target_pointer_width="64", - target_os="windows", - all(target_arch="wasm32", not(target_os="emscripten")), -))] -mod max { - pub const SECONDS: u64 = 253_402_300_800-1; // last second of year 9999 - #[allow(unused)] - pub const TIMESTAMP: &str = "9999-12-31T23:59:59Z"; -} - -/// Error parsing datetime (timestamp) -#[derive(Debug, PartialEq, Clone, Copy)] -pub enum Error { - /// Numeric component is out of range - OutOfRange, - /// Bad character where digit is expected - InvalidDigit, - /// Other formatting errors - InvalidFormat, -} - -impl StdError for Error {} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Error::OutOfRange => write!(f, "numeric component is out of range"), - Error::InvalidDigit => write!(f, "bad character where digit is expected"), - Error::InvalidFormat => write!(f, "timestamp format is invalid"), - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -enum Precision { - Smart, - Seconds, - Millis, - Micros, - Nanos, -} - -/// A wrapper type that allows you to Display a SystemTime -#[derive(Debug, Clone)] -pub struct Rfc3339Timestamp(SystemTime, Precision); - -#[inline] -fn two_digits(b1: u8, b2: u8) -> Result { - if b1 < b'0' || b2 < b'0' || b1 > b'9' || b2 > b'9' { - return Err(Error::InvalidDigit); - } - Ok(((b1 - b'0')*10 + (b2 - b'0')) as u64) -} - -/// Parse RFC3339 timestamp `2018-02-14T00:28:07Z` -/// -/// Supported feature: any precision of fractional -/// digits `2018-02-14T00:28:07.133Z`. -/// -/// Unsupported feature: localized timestamps. Only UTC is supported. -pub fn parse_rfc3339(s: &str) -> Result { - if s.len() < "2018-02-14T00:28:07Z".len() { - return Err(Error::InvalidFormat); - } - let b = s.as_bytes(); - if b[10] != b'T' || b[b.len()-1] != b'Z' { - return Err(Error::InvalidFormat); - } - parse_rfc3339_weak(s) -} - -/// Parse RFC3339-like timestamp `2018-02-14 00:28:07` -/// -/// Supported features: -/// -/// 1. Any precision of fractional digits `2018-02-14 00:28:07.133`. -/// 2. Supports timestamp with or without either of `T` or `Z` -/// 3. Anything valid for `parse_3339` is valid for this function -/// -/// Unsupported feature: localized timestamps. Only UTC is supported, even if -/// `Z` is not specified. -/// -/// This function is intended to use for parsing human input. Whereas -/// `parse_rfc3339` is for strings generated programmatically. -pub fn parse_rfc3339_weak(s: &str) -> Result { - if s.len() < "2018-02-14T00:28:07".len() { - return Err(Error::InvalidFormat); - } - let b = s.as_bytes(); // for careless slicing - if b[4] != b'-' || b[7] != b'-' || (b[10] != b'T' && b[10] != b' ') || - b[13] != b':' || b[16] != b':' - { - return Err(Error::InvalidFormat); - } - let year = two_digits(b[0], b[1])? * 100 + two_digits(b[2], b[3])?; - let month = two_digits(b[5], b[6])?; - let day = two_digits(b[8], b[9])?; - let hour = two_digits(b[11], b[12])?; - let minute = two_digits(b[14], b[15])?; - let mut second = two_digits(b[17], b[18])?; - - if year < 1970 || hour > 23 || minute > 59 || second > 60 { - return Err(Error::OutOfRange); - } - // TODO(tailhook) should we check that leaps second is only on midnight ? - if second == 60 { - second = 59 - }; - let leap_years = ((year - 1) - 1968) / 4 - ((year - 1) - 1900) / 100 + - ((year - 1) - 1600) / 400; - let leap = is_leap_year(year); - let (mut ydays, mdays) = match month { - 1 => (0, 31), - 2 if leap => (31, 29), - 2 => (31, 28), - 3 => (59, 31), - 4 => (90, 30), - 5 => (120, 31), - 6 => (151, 30), - 7 => (181, 31), - 8 => (212, 31), - 9 => (243, 30), - 10 => (273, 31), - 11 => (304, 30), - 12 => (334, 31), - _ => return Err(Error::OutOfRange), - }; - if day > mdays || day == 0 { - return Err(Error::OutOfRange); - } - ydays += day - 1; - if leap && month > 2 { - ydays += 1; - } - let days = (year - 1970) * 365 + leap_years + ydays; - - let time = second + minute * 60 + hour * 3600; - - let mut nanos = 0; - let mut mult = 100_000_000; - if b.get(19) == Some(&b'.') { - for idx in 20..b.len() { - if b[idx] == b'Z' { - if idx == b.len()-1 { - break; - } else { - return Err(Error::InvalidDigit); - } - } - if b[idx] < b'0' || b[idx] > b'9' { - return Err(Error::InvalidDigit); - } - nanos += mult * (b[idx] - b'0') as u32; - mult /= 10; - } - } else if b.len() != 19 && (b.len() > 20 || b[19] != b'Z') { - return Err(Error::InvalidFormat); - } - - let total_seconds = time + days * 86400; - if total_seconds > max::SECONDS { - return Err(Error::OutOfRange); - } - - Ok(UNIX_EPOCH + Duration::new(total_seconds, nanos)) -} - -fn is_leap_year(y: u64) -> bool { - y % 4 == 0 && (y % 100 != 0 || y % 400 == 0) -} - -/// Format an RFC3339 timestamp `2018-02-14T00:28:07Z` -/// -/// This function formats timestamp with smart precision: i.e. if it has no -/// fractional seconds, they aren't written at all. And up to nine digits if -/// they are. -/// -/// The value is always UTC and ignores system timezone. -pub fn format_rfc3339(system_time: SystemTime) -> Rfc3339Timestamp { - Rfc3339Timestamp(system_time, Precision::Smart) -} - -/// Format an RFC3339 timestamp `2018-02-14T00:28:07Z` -/// -/// This format always shows timestamp without fractional seconds. -/// -/// The value is always UTC and ignores system timezone. -pub fn format_rfc3339_seconds(system_time: SystemTime) -> Rfc3339Timestamp { - Rfc3339Timestamp(system_time, Precision::Seconds) -} - -/// Format an RFC3339 timestamp `2018-02-14T00:28:07.000Z` -/// -/// This format always shows milliseconds even if millisecond value is zero. -/// -/// The value is always UTC and ignores system timezone. -pub fn format_rfc3339_millis(system_time: SystemTime) -> Rfc3339Timestamp { - Rfc3339Timestamp(system_time, Precision::Millis) -} - -/// Format an RFC3339 timestamp `2018-02-14T00:28:07.000000Z` -/// -/// This format always shows microseconds even if microsecond value is zero. -/// -/// The value is always UTC and ignores system timezone. -pub fn format_rfc3339_micros(system_time: SystemTime) -> Rfc3339Timestamp { - Rfc3339Timestamp(system_time, Precision::Micros) -} - -/// Format an RFC3339 timestamp `2018-02-14T00:28:07.000000000Z` -/// -/// This format always shows nanoseconds even if nanosecond value is zero. -/// -/// The value is always UTC and ignores system timezone. -pub fn format_rfc3339_nanos(system_time: SystemTime) -> Rfc3339Timestamp { - Rfc3339Timestamp(system_time, Precision::Nanos) -} - -impl Rfc3339Timestamp { - /// Returns a reference to the [`SystemTime`][] that is being formatted. - pub fn get_ref(&self) -> &SystemTime { - &self.0 - } -} - -impl fmt::Display for Rfc3339Timestamp { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::Precision::*; - - let dur = self.0.duration_since(UNIX_EPOCH) - .expect("all times should be after the epoch"); - let secs_since_epoch = dur.as_secs(); - let nanos = dur.subsec_nanos(); - - if secs_since_epoch >= 253_402_300_800 { // year 9999 - return Err(fmt::Error); - } - - /* 2000-03-01 (mod 400 year, immediately after feb29 */ - const LEAPOCH: i64 = 11017; - const DAYS_PER_400Y: i64 = 365*400 + 97; - const DAYS_PER_100Y: i64 = 365*100 + 24; - const DAYS_PER_4Y: i64 = 365*4 + 1; - - let days = (secs_since_epoch / 86400) as i64 - LEAPOCH; - let secs_of_day = secs_since_epoch % 86400; - - let mut qc_cycles = days / DAYS_PER_400Y; - let mut remdays = days % DAYS_PER_400Y; - - if remdays < 0 { - remdays += DAYS_PER_400Y; - qc_cycles -= 1; - } - - let mut c_cycles = remdays / DAYS_PER_100Y; - if c_cycles == 4 { c_cycles -= 1; } - remdays -= c_cycles * DAYS_PER_100Y; - - let mut q_cycles = remdays / DAYS_PER_4Y; - if q_cycles == 25 { q_cycles -= 1; } - remdays -= q_cycles * DAYS_PER_4Y; - - let mut remyears = remdays / 365; - if remyears == 4 { remyears -= 1; } - remdays -= remyears * 365; - - let mut year = 2000 + - remyears + 4*q_cycles + 100*c_cycles + 400*qc_cycles; - - let months = [31,30,31,30,31,31,30,31,30,31,31,29]; - let mut mon = 0; - for mon_len in months.iter() { - mon += 1; - if remdays < *mon_len { - break; - } - remdays -= *mon_len; - } - let mday = remdays+1; - let mon = if mon + 2 > 12 { - year += 1; - mon - 10 - } else { - mon + 2 - }; - - let mut buf: [u8; 30] = [ - // Too long to write as: b"0000-00-00T00:00:00.000000000Z" - b'0', b'0', b'0', b'0', b'-', b'0', b'0', b'-', b'0', b'0', b'T', - b'0', b'0', b':', b'0', b'0', b':', b'0', b'0', - b'.', b'0', b'0', b'0', b'0', b'0', b'0', b'0', b'0', b'0', b'Z', - ]; - buf[0] = b'0' + (year / 1000) as u8; - buf[1] = b'0' + (year / 100 % 10) as u8; - buf[2] = b'0' + (year / 10 % 10) as u8; - buf[3] = b'0' + (year % 10) as u8; - buf[5] = b'0' + (mon / 10) as u8; - buf[6] = b'0' + (mon % 10) as u8; - buf[8] = b'0' + (mday / 10) as u8; - buf[9] = b'0' + (mday % 10) as u8; - buf[11] = b'0' + (secs_of_day / 3600 / 10) as u8; - buf[12] = b'0' + (secs_of_day / 3600 % 10) as u8; - buf[14] = b'0' + (secs_of_day / 60 / 10 % 6) as u8; - buf[15] = b'0' + (secs_of_day / 60 % 10) as u8; - buf[17] = b'0' + (secs_of_day / 10 % 6) as u8; - buf[18] = b'0' + (secs_of_day % 10) as u8; - - let offset = if self.1 == Seconds || nanos == 0 && self.1 == Smart { - buf[19] = b'Z'; - 19 - } else if self.1 == Millis { - buf[20] = b'0' + (nanos / 100_000_000) as u8; - buf[21] = b'0' + (nanos / 10_000_000 % 10) as u8; - buf[22] = b'0' + (nanos / 1_000_000 % 10) as u8; - buf[23] = b'Z'; - 23 - } else if self.1 == Micros { - buf[20] = b'0' + (nanos / 100_000_000) as u8; - buf[21] = b'0' + (nanos / 10_000_000 % 10) as u8; - buf[22] = b'0' + (nanos / 1_000_000 % 10) as u8; - buf[23] = b'0' + (nanos / 100_000 % 10) as u8; - buf[24] = b'0' + (nanos / 10_000 % 10) as u8; - buf[25] = b'0' + (nanos / 1_000 % 10) as u8; - buf[26] = b'Z'; - 26 - } else { - buf[20] = b'0' + (nanos / 100_000_000) as u8; - buf[21] = b'0' + (nanos / 10_000_000 % 10) as u8; - buf[22] = b'0' + (nanos / 1_000_000 % 10) as u8; - buf[23] = b'0' + (nanos / 100_000 % 10) as u8; - buf[24] = b'0' + (nanos / 10_000 % 10) as u8; - buf[25] = b'0' + (nanos / 1_000 % 10) as u8; - buf[26] = b'0' + (nanos / 100 % 10) as u8; - buf[27] = b'0' + (nanos / 10 % 10) as u8; - buf[28] = b'0' + (nanos / 1 % 10) as u8; - // 29th is 'Z' - 29 - }; - - // we know our chars are all ascii - f.write_str(str::from_utf8(&buf[..=offset]).expect("Conversion to utf8 failed")) - } -} - -#[cfg(test)] -mod test { - use std::str::from_utf8; - use std::time::{UNIX_EPOCH, SystemTime, Duration}; - - use rand::Rng; - - use super::{parse_rfc3339, parse_rfc3339_weak, format_rfc3339}; - use super::{format_rfc3339_millis, format_rfc3339_micros}; - use super::{format_rfc3339_nanos}; - use super::max; - - fn from_sec(sec: u64) -> (String, SystemTime) { - let s = time::at_utc(time::Timespec { sec: sec as i64, nsec: 0 }) - .rfc3339().to_string(); - let time = UNIX_EPOCH + Duration::new(sec, 0); - (s, time) - } - - #[test] - #[cfg(all(target_pointer_width="32", target_os="linux"))] - fn year_after_2038_fails_gracefully() { - // next second - assert_eq!(parse_rfc3339("2038-01-19T03:14:08Z").unwrap_err(), - super::Error::OutOfRange); - assert_eq!(parse_rfc3339("9999-12-31T23:59:59Z").unwrap_err(), - super::Error::OutOfRange); - } - - #[test] - fn smoke_tests_parse() { - assert_eq!(parse_rfc3339("1970-01-01T00:00:00Z").unwrap(), - UNIX_EPOCH + Duration::new(0, 0)); - assert_eq!(parse_rfc3339("1970-01-01T00:00:01Z").unwrap(), - UNIX_EPOCH + Duration::new(1, 0)); - assert_eq!(parse_rfc3339("2018-02-13T23:08:32Z").unwrap(), - UNIX_EPOCH + Duration::new(1_518_563_312, 0)); - assert_eq!(parse_rfc3339("2012-01-01T00:00:00Z").unwrap(), - UNIX_EPOCH + Duration::new(1_325_376_000, 0)); - } - - #[test] - fn smoke_tests_format() { - assert_eq!( - format_rfc3339(UNIX_EPOCH + Duration::new(0, 0)).to_string(), - "1970-01-01T00:00:00Z"); - assert_eq!( - format_rfc3339(UNIX_EPOCH + Duration::new(1, 0)).to_string(), - "1970-01-01T00:00:01Z"); - assert_eq!( - format_rfc3339(UNIX_EPOCH + Duration::new(1_518_563_312, 0)).to_string(), - "2018-02-13T23:08:32Z"); - assert_eq!( - format_rfc3339(UNIX_EPOCH + Duration::new(1_325_376_000, 0)).to_string(), - "2012-01-01T00:00:00Z"); - } - - #[test] - fn smoke_tests_format_millis() { - assert_eq!( - format_rfc3339_millis(UNIX_EPOCH + - Duration::new(0, 0)).to_string(), - "1970-01-01T00:00:00.000Z"); - assert_eq!( - format_rfc3339_millis(UNIX_EPOCH + - Duration::new(1_518_563_312, 123_000_000)).to_string(), - "2018-02-13T23:08:32.123Z"); - } - - #[test] - fn smoke_tests_format_micros() { - assert_eq!( - format_rfc3339_micros(UNIX_EPOCH + - Duration::new(0, 0)).to_string(), - "1970-01-01T00:00:00.000000Z"); - assert_eq!( - format_rfc3339_micros(UNIX_EPOCH + - Duration::new(1_518_563_312, 123_000_000)).to_string(), - "2018-02-13T23:08:32.123000Z"); - assert_eq!( - format_rfc3339_micros(UNIX_EPOCH + - Duration::new(1_518_563_312, 456_123_000)).to_string(), - "2018-02-13T23:08:32.456123Z"); - } - - #[test] - fn smoke_tests_format_nanos() { - assert_eq!( - format_rfc3339_nanos(UNIX_EPOCH + - Duration::new(0, 0)).to_string(), - "1970-01-01T00:00:00.000000000Z"); - assert_eq!( - format_rfc3339_nanos(UNIX_EPOCH + - Duration::new(1_518_563_312, 123_000_000)).to_string(), - "2018-02-13T23:08:32.123000000Z"); - assert_eq!( - format_rfc3339_nanos(UNIX_EPOCH + - Duration::new(1_518_563_312, 789_456_123)).to_string(), - "2018-02-13T23:08:32.789456123Z"); - } - - #[test] - fn upper_bound() { - let max = UNIX_EPOCH + Duration::new(max::SECONDS, 0); - assert_eq!(parse_rfc3339(&max::TIMESTAMP).unwrap(), max); - assert_eq!(format_rfc3339(max).to_string(), max::TIMESTAMP); - } - - #[test] - fn leap_second() { - assert_eq!(parse_rfc3339("2016-12-31T23:59:60Z").unwrap(), - UNIX_EPOCH + Duration::new(1_483_228_799, 0)); - } - - #[test] - fn first_731_days() { - let year_start = 0; // 1970 - for day in 0..= 365 * 2 { // scan leap year and non-leap year - let (s, time) = from_sec(year_start + day * 86400); - assert_eq!(parse_rfc3339(&s).unwrap(), time); - assert_eq!(format_rfc3339(time).to_string(), s); - } - } - - #[test] - fn the_731_consecutive_days() { - let year_start = 1_325_376_000; // 2012 - for day in 0..= 365 * 2 { // scan leap year and non-leap year - let (s, time) = from_sec(year_start + day * 86400); - assert_eq!(parse_rfc3339(&s).unwrap(), time); - assert_eq!(format_rfc3339(time).to_string(), s); - } - } - - #[test] - fn all_86400_seconds() { - let day_start = 1_325_376_000; - for second in 0..86400 { // scan leap year and non-leap year - let (s, time) = from_sec(day_start + second); - assert_eq!(parse_rfc3339(&s).unwrap(), time); - assert_eq!(format_rfc3339(time).to_string(), s); - } - } - - #[test] - fn random_past() { - let upper = SystemTime::now().duration_since(UNIX_EPOCH).unwrap() - .as_secs(); - for _ in 0..10000 { - let sec = rand::thread_rng().gen_range(0, upper); - let (s, time) = from_sec(sec); - assert_eq!(parse_rfc3339(&s).unwrap(), time); - assert_eq!(format_rfc3339(time).to_string(), s); - } - } - - #[test] - fn random_wide_range() { - for _ in 0..100_000 { - let sec = rand::thread_rng().gen_range(0, max::SECONDS); - let (s, time) = from_sec(sec); - assert_eq!(parse_rfc3339(&s).unwrap(), time); - assert_eq!(format_rfc3339(time).to_string(), s); - } - } - - #[test] - fn milliseconds() { - assert_eq!(parse_rfc3339("1970-01-01T00:00:00.123Z").unwrap(), - UNIX_EPOCH + Duration::new(0, 123_000_000)); - assert_eq!(format_rfc3339(UNIX_EPOCH + Duration::new(0, 123_000_000)) - .to_string(), "1970-01-01T00:00:00.123000000Z"); - } - - #[test] - #[should_panic(expected="OutOfRange")] - fn zero_month() { - parse_rfc3339("1970-00-01T00:00:00Z").unwrap(); - } - - #[test] - #[should_panic(expected="OutOfRange")] - fn big_month() { - parse_rfc3339("1970-32-01T00:00:00Z").unwrap(); - } - - #[test] - #[should_panic(expected="OutOfRange")] - fn zero_day() { - parse_rfc3339("1970-01-00T00:00:00Z").unwrap(); - } - - #[test] - #[should_panic(expected="OutOfRange")] - fn big_day() { - parse_rfc3339("1970-12-35T00:00:00Z").unwrap(); - } - - #[test] - #[should_panic(expected="OutOfRange")] - fn big_day2() { - parse_rfc3339("1970-02-30T00:00:00Z").unwrap(); - } - - #[test] - #[should_panic(expected="OutOfRange")] - fn big_second() { - parse_rfc3339("1970-12-30T00:00:78Z").unwrap(); - } - - #[test] - #[should_panic(expected="OutOfRange")] - fn big_minute() { - parse_rfc3339("1970-12-30T00:78:00Z").unwrap(); - } - - #[test] - #[should_panic(expected="OutOfRange")] - fn big_hour() { - parse_rfc3339("1970-12-30T24:00:00Z").unwrap(); - } - - #[test] - fn break_data() { - for pos in 0.."2016-12-31T23:59:60Z".len() { - let mut s = b"2016-12-31T23:59:60Z".to_vec(); - s[pos] = b'x'; - parse_rfc3339(from_utf8(&s).unwrap()).unwrap_err(); - } - } - - #[test] - fn weak_smoke_tests() { - assert_eq!(parse_rfc3339_weak("1970-01-01 00:00:00").unwrap(), - UNIX_EPOCH + Duration::new(0, 0)); - parse_rfc3339("1970-01-01 00:00:00").unwrap_err(); - - assert_eq!(parse_rfc3339_weak("1970-01-01 00:00:00.000123").unwrap(), - UNIX_EPOCH + Duration::new(0, 123_000)); - parse_rfc3339("1970-01-01 00:00:00.000123").unwrap_err(); - - assert_eq!(parse_rfc3339_weak("1970-01-01T00:00:00.000123").unwrap(), - UNIX_EPOCH + Duration::new(0, 123_000)); - parse_rfc3339("1970-01-01T00:00:00.000123").unwrap_err(); - - assert_eq!(parse_rfc3339_weak("1970-01-01 00:00:00.000123Z").unwrap(), - UNIX_EPOCH + Duration::new(0, 123_000)); - parse_rfc3339("1970-01-01 00:00:00.000123Z").unwrap_err(); - - assert_eq!(parse_rfc3339_weak("1970-01-01 00:00:00Z").unwrap(), - UNIX_EPOCH + Duration::new(0, 0)); - parse_rfc3339("1970-01-01 00:00:00Z").unwrap_err(); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/src/duration.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/src/duration.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/src/duration.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/src/duration.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,456 +0,0 @@ -use std::error::Error as StdError; -use std::fmt; -use std::str::Chars; -use std::time::Duration; - -/// Error parsing human-friendly duration -#[derive(Debug, PartialEq, Clone)] -pub enum Error { - /// Invalid character during parsing - /// - /// More specifically anything that is not alphanumeric is prohibited - /// - /// The field is an byte offset of the character in the string. - InvalidCharacter(usize), - /// Non-numeric value where number is expected - /// - /// This usually means that either time unit is broken into words, - /// e.g. `m sec` instead of `msec`, or just number is omitted, - /// for example `2 hours min` instead of `2 hours 1 min` - /// - /// The field is an byte offset of the errorneous character - /// in the string. - NumberExpected(usize), - /// Unit in the number is not one of allowed units - /// - /// See documentation of `parse_duration` for the list of supported - /// time units. - /// - /// The two fields are start and end (exclusive) of the slice from - /// the original string, containing errorneous value - UnknownUnit { - /// Start of the invalid unit inside the original string - start: usize, - /// End of the invalid unit inside the original string - end: usize, - /// The unit verbatim - unit: String, - /// A number associated with the unit - value: u64, - }, - /// The numeric value is too large - /// - /// Usually this means value is too large to be useful. If user writes - /// data in subsecond units, then the maximum is about 3k years. When - /// using seconds, or larger units, the limit is even larger. - NumberOverflow, - /// The value was an empty string (or consists only whitespace) - Empty, -} - -impl StdError for Error {} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Error::InvalidCharacter(offset) => write!(f, "invalid character at {}", offset), - Error::NumberExpected(offset) => write!(f, "expected number at {}", offset), - Error::UnknownUnit { unit, value, .. } if &unit == &"" => { - write!(f, - "time unit needed, for example {0}sec or {0}ms", - value, - ) - } - Error::UnknownUnit { unit, .. } => { - write!( - f, - "unknown time unit {:?}, \ - supported units: ns, us, ms, sec, min, hours, days, \ - weeks, months, years (and few variations)", - unit - ) - } - Error::NumberOverflow => write!(f, "number is too large"), - Error::Empty => write!(f, "value was empty"), - } - } -} - -/// A wrapper type that allows you to Display a Duration -#[derive(Debug, Clone)] -pub struct FormattedDuration(Duration); - -trait OverflowOp: Sized { - fn mul(self, other: Self) -> Result; - fn add(self, other: Self) -> Result; -} - -impl OverflowOp for u64 { - fn mul(self, other: Self) -> Result { - self.checked_mul(other).ok_or(Error::NumberOverflow) - } - fn add(self, other: Self) -> Result { - self.checked_add(other).ok_or(Error::NumberOverflow) - } -} - -struct Parser<'a> { - iter: Chars<'a>, - src: &'a str, - current: (u64, u64), -} - -impl<'a> Parser<'a> { - fn off(&self) -> usize { - self.src.len() - self.iter.as_str().len() - } - - fn parse_first_char(&mut self) -> Result, Error> { - let off = self.off(); - for c in self.iter.by_ref() { - match c { - '0'..='9' => { - return Ok(Some(c as u64 - '0' as u64)); - } - c if c.is_whitespace() => continue, - _ => { - return Err(Error::NumberExpected(off)); - } - } - } - Ok(None) - } - fn parse_unit(&mut self, n: u64, start: usize, end: usize) - -> Result<(), Error> - { - let (mut sec, nsec) = match &self.src[start..end] { - "nanos" | "nsec" | "ns" => (0u64, n), - "usec" | "us" => (0u64, n.mul(1000)?), - "millis" | "msec" | "ms" => (0u64, n.mul(1_000_000)?), - "seconds" | "second" | "secs" | "sec" | "s" => (n, 0), - "minutes" | "minute" | "min" | "mins" | "m" - => (n.mul(60)?, 0), - "hours" | "hour" | "hr" | "hrs" | "h" => (n.mul(3600)?, 0), - "days" | "day" | "d" => (n.mul(86400)?, 0), - "weeks" | "week" | "w" => (n.mul(86400*7)?, 0), - "months" | "month" | "M" => (n.mul(2_630_016)?, 0), // 30.44d - "years" | "year" | "y" => (n.mul(31_557_600)?, 0), // 365.25d - _ => { - return Err(Error::UnknownUnit { - start, end, - unit: self.src[start..end].to_string(), - value: n, - }); - } - }; - let mut nsec = self.current.1.add(nsec)?; - if nsec > 1_000_000_000 { - sec = sec.add(nsec / 1_000_000_000)?; - nsec %= 1_000_000_000; - } - sec = self.current.0.add(sec)?; - self.current = (sec, nsec); - Ok(()) - } - - fn parse(mut self) -> Result { - let mut n = self.parse_first_char()?.ok_or(Error::Empty)?; - 'outer: loop { - let mut off = self.off(); - while let Some(c) = self.iter.next() { - match c { - '0'..='9' => { - n = n.checked_mul(10) - .and_then(|x| x.checked_add(c as u64 - '0' as u64)) - .ok_or(Error::NumberOverflow)?; - } - c if c.is_whitespace() => {} - 'a'..='z' | 'A'..='Z' => { - break; - } - _ => { - return Err(Error::InvalidCharacter(off)); - } - } - off = self.off(); - } - let start = off; - let mut off = self.off(); - while let Some(c) = self.iter.next() { - match c { - '0'..='9' => { - self.parse_unit(n, start, off)?; - n = c as u64 - '0' as u64; - continue 'outer; - } - c if c.is_whitespace() => break, - 'a'..='z' | 'A'..='Z' => {} - _ => { - return Err(Error::InvalidCharacter(off)); - } - } - off = self.off(); - } - self.parse_unit(n, start, off)?; - n = match self.parse_first_char()? { - Some(n) => n, - None => return Ok( - Duration::new(self.current.0, self.current.1 as u32)), - }; - } - } - -} - -/// Parse duration object `1hour 12min 5s` -/// -/// The duration object is a concatenation of time spans. Where each time -/// span is an integer number and a suffix. Supported suffixes: -/// -/// * `nsec`, `ns` -- nanoseconds -/// * `usec`, `us` -- microseconds -/// * `msec`, `ms` -- milliseconds -/// * `seconds`, `second`, `sec`, `s` -/// * `minutes`, `minute`, `min`, `m` -/// * `hours`, `hour`, `hr`, `h` -/// * `days`, `day`, `d` -/// * `weeks`, `week`, `w` -/// * `months`, `month`, `M` -- defined as 30.44 days -/// * `years`, `year`, `y` -- defined as 365.25 days -/// -/// # Examples -/// -/// ``` -/// use std::time::Duration; -/// use humantime::parse_duration; -/// -/// assert_eq!(parse_duration("2h 37min"), Ok(Duration::new(9420, 0))); -/// assert_eq!(parse_duration("32ms"), Ok(Duration::new(0, 32_000_000))); -/// ``` -pub fn parse_duration(s: &str) -> Result { - Parser { - iter: s.chars(), - src: s, - current: (0, 0), - }.parse() -} - -/// Formats duration into a human-readable string -/// -/// Note: this format is guaranteed to have same value when using -/// parse_duration, but we can change some details of the exact composition -/// of the value. -/// -/// # Examples -/// -/// ``` -/// use std::time::Duration; -/// use humantime::format_duration; -/// -/// let val1 = Duration::new(9420, 0); -/// assert_eq!(format_duration(val1).to_string(), "2h 37m"); -/// let val2 = Duration::new(0, 32_000_000); -/// assert_eq!(format_duration(val2).to_string(), "32ms"); -/// ``` -pub fn format_duration(val: Duration) -> FormattedDuration { - FormattedDuration(val) -} - -fn item_plural(f: &mut fmt::Formatter, started: &mut bool, - name: &str, value: u64) - -> fmt::Result -{ - if value > 0 { - if *started { - f.write_str(" ")?; - } - write!(f, "{}{}", value, name)?; - if value > 1 { - f.write_str("s")?; - } - *started = true; - } - Ok(()) -} -fn item(f: &mut fmt::Formatter, started: &mut bool, name: &str, value: u32) - -> fmt::Result -{ - if value > 0 { - if *started { - f.write_str(" ")?; - } - write!(f, "{}{}", value, name)?; - *started = true; - } - Ok(()) -} - -impl FormattedDuration { - /// Returns a reference to the [`Duration`][] that is being formatted. - pub fn get_ref(&self) -> &Duration { - &self.0 - } -} - -impl fmt::Display for FormattedDuration { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let secs = self.0.as_secs(); - let nanos = self.0.subsec_nanos(); - - if secs == 0 && nanos == 0 { - f.write_str("0s")?; - return Ok(()); - } - - let years = secs / 31_557_600; // 365.25d - let ydays = secs % 31_557_600; - let months = ydays / 2_630_016; // 30.44d - let mdays = ydays % 2_630_016; - let days = mdays / 86400; - let day_secs = mdays % 86400; - let hours = day_secs / 3600; - let minutes = day_secs % 3600 / 60; - let seconds = day_secs % 60; - - let millis = nanos / 1_000_000; - let micros = nanos / 1000 % 1000; - let nanosec = nanos % 1000; - - let ref mut started = false; - item_plural(f, started, "year", years)?; - item_plural(f, started, "month", months)?; - item_plural(f, started, "day", days)?; - item(f, started, "h", hours as u32)?; - item(f, started, "m", minutes as u32)?; - item(f, started, "s", seconds as u32)?; - item(f, started, "ms", millis)?; - item(f, started, "us", micros)?; - item(f, started, "ns", nanosec)?; - Ok(()) - } -} - -#[cfg(test)] -mod test { - use std::time::Duration; - - use rand::Rng; - - use super::{parse_duration, format_duration}; - use super::Error; - - #[test] - #[allow(clippy::cognitive_complexity)] - fn test_units() { - assert_eq!(parse_duration("17nsec"), Ok(Duration::new(0, 17))); - assert_eq!(parse_duration("17nanos"), Ok(Duration::new(0, 17))); - assert_eq!(parse_duration("33ns"), Ok(Duration::new(0, 33))); - assert_eq!(parse_duration("3usec"), Ok(Duration::new(0, 3000))); - assert_eq!(parse_duration("78us"), Ok(Duration::new(0, 78000))); - assert_eq!(parse_duration("31msec"), Ok(Duration::new(0, 31_000_000))); - assert_eq!(parse_duration("31millis"), Ok(Duration::new(0, 31_000_000))); - assert_eq!(parse_duration("6ms"), Ok(Duration::new(0, 6_000_000))); - assert_eq!(parse_duration("3000s"), Ok(Duration::new(3000, 0))); - assert_eq!(parse_duration("300sec"), Ok(Duration::new(300, 0))); - assert_eq!(parse_duration("300secs"), Ok(Duration::new(300, 0))); - assert_eq!(parse_duration("50seconds"), Ok(Duration::new(50, 0))); - assert_eq!(parse_duration("1second"), Ok(Duration::new(1, 0))); - assert_eq!(parse_duration("100m"), Ok(Duration::new(6000, 0))); - assert_eq!(parse_duration("12min"), Ok(Duration::new(720, 0))); - assert_eq!(parse_duration("12mins"), Ok(Duration::new(720, 0))); - assert_eq!(parse_duration("1minute"), Ok(Duration::new(60, 0))); - assert_eq!(parse_duration("7minutes"), Ok(Duration::new(420, 0))); - assert_eq!(parse_duration("2h"), Ok(Duration::new(7200, 0))); - assert_eq!(parse_duration("7hr"), Ok(Duration::new(25200, 0))); - assert_eq!(parse_duration("7hrs"), Ok(Duration::new(25200, 0))); - assert_eq!(parse_duration("1hour"), Ok(Duration::new(3600, 0))); - assert_eq!(parse_duration("24hours"), Ok(Duration::new(86400, 0))); - assert_eq!(parse_duration("1day"), Ok(Duration::new(86400, 0))); - assert_eq!(parse_duration("2days"), Ok(Duration::new(172_800, 0))); - assert_eq!(parse_duration("365d"), Ok(Duration::new(31_536_000, 0))); - assert_eq!(parse_duration("1week"), Ok(Duration::new(604_800, 0))); - assert_eq!(parse_duration("7weeks"), Ok(Duration::new(4_233_600, 0))); - assert_eq!(parse_duration("52w"), Ok(Duration::new(31_449_600, 0))); - assert_eq!(parse_duration("1month"), Ok(Duration::new(2_630_016, 0))); - assert_eq!(parse_duration("3months"), Ok(Duration::new(3*2_630_016, 0))); - assert_eq!(parse_duration("12M"), Ok(Duration::new(31_560_192, 0))); - assert_eq!(parse_duration("1year"), Ok(Duration::new(31_557_600, 0))); - assert_eq!(parse_duration("7years"), Ok(Duration::new(7*31_557_600, 0))); - assert_eq!(parse_duration("17y"), Ok(Duration::new(536_479_200, 0))); - } - - #[test] - fn test_combo() { - assert_eq!(parse_duration("20 min 17 nsec "), Ok(Duration::new(1200, 17))); - assert_eq!(parse_duration("2h 15m"), Ok(Duration::new(8100, 0))); - } - - #[test] - fn all_86400_seconds() { - for second in 0..86400 { // scan leap year and non-leap year - let d = Duration::new(second, 0); - assert_eq!(d, - parse_duration(&format_duration(d).to_string()).unwrap()); - } - } - - #[test] - fn random_second() { - for _ in 0..10000 { - let sec = rand::thread_rng().gen_range(0, 253_370_764_800); - let d = Duration::new(sec, 0); - assert_eq!(d, - parse_duration(&format_duration(d).to_string()).unwrap()); - } - } - - #[test] - fn random_any() { - for _ in 0..10000 { - let sec = rand::thread_rng().gen_range(0, 253_370_764_800); - let nanos = rand::thread_rng().gen_range(0, 1_000_000_000); - let d = Duration::new(sec, nanos); - assert_eq!(d, - parse_duration(&format_duration(d).to_string()).unwrap()); - } - } - - #[test] - fn test_overlow() { - // Overflow on subseconds is earlier because of how we do conversion - // we could fix it, but I don't see any good reason for this - assert_eq!(parse_duration("100000000000000000000ns"), - Err(Error::NumberOverflow)); - assert_eq!(parse_duration("100000000000000000us"), - Err(Error::NumberOverflow)); - assert_eq!(parse_duration("100000000000000ms"), - Err(Error::NumberOverflow)); - - assert_eq!(parse_duration("100000000000000000000s"), - Err(Error::NumberOverflow)); - assert_eq!(parse_duration("10000000000000000000m"), - Err(Error::NumberOverflow)); - assert_eq!(parse_duration("1000000000000000000h"), - Err(Error::NumberOverflow)); - assert_eq!(parse_duration("100000000000000000d"), - Err(Error::NumberOverflow)); - assert_eq!(parse_duration("10000000000000000w"), - Err(Error::NumberOverflow)); - assert_eq!(parse_duration("1000000000000000M"), - Err(Error::NumberOverflow)); - assert_eq!(parse_duration("10000000000000y"), - Err(Error::NumberOverflow)); - } - - #[test] - fn test_nice_error_message() { - assert_eq!(parse_duration("123").unwrap_err().to_string(), - "time unit needed, for example 123sec or 123ms"); - assert_eq!(parse_duration("10 months 1").unwrap_err().to_string(), - "time unit needed, for example 1sec or 1ms"); - assert_eq!(parse_duration("10nights").unwrap_err().to_string(), - "unknown time unit \"nights\", supported units: \ - ns, us, ms, sec, min, hours, days, weeks, months, \ - years (and few variations)"); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/src/lib.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/src/lib.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/src/lib.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -//! Human-friendly time parser and formatter -//! -//! Features: -//! -//! * Parses durations in free form like `15days 2min 2s` -//! * Formats durations in similar form `2years 2min 12us` -//! * Parses and formats timestamp in `rfc3339` format: `2018-01-01T12:53:00Z` -//! * Parses timestamps in a weaker format: `2018-01-01 12:53:00` -//! -//! Timestamp parsing/formatting is super-fast because format is basically -//! fixed. -//! -//! See [humantime-serde] for serde integration (previous crate [serde-humantime] looks unmaintained). -//! -//! [serde-humantime]: https://docs.rs/serde-humantime/0.1.1/serde_humantime/ -//! [humantime-serde]: https://docs.rs/humantime-serde - -#![forbid(unsafe_code)] -#![warn(missing_debug_implementations)] -#![warn(missing_docs)] - -mod duration; -mod wrapper; -mod date; - -pub use self::duration::{parse_duration, Error as DurationError}; -pub use self::duration::{format_duration, FormattedDuration}; -pub use self::wrapper::{Duration, Timestamp}; -pub use self::date::{parse_rfc3339, parse_rfc3339_weak, Error as TimestampError}; -pub use self::date::{ - format_rfc3339, format_rfc3339_micros, format_rfc3339_millis, format_rfc3339_nanos, - format_rfc3339_seconds, -}; -pub use self::date::{Rfc3339Timestamp}; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/src/wrapper.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/src/wrapper.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/src/wrapper.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/src/wrapper.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,107 +0,0 @@ -use std::str::FromStr; -use std::ops::Deref; -use std::fmt; -use std::time::{Duration as StdDuration, SystemTime}; - -use crate::duration::{self, parse_duration, format_duration}; -use crate::date::{self, parse_rfc3339_weak, format_rfc3339}; - -/// A wrapper for duration that has `FromStr` implementation -/// -/// This is useful if you want to use it somewhere where `FromStr` is -/// expected. -/// -/// See `parse_duration` for the description of the format. -/// -/// # Example -/// -/// ``` -/// use std::time::Duration; -/// let x: Duration; -/// x = "12h 5min 2ns".parse::().unwrap().into(); -/// assert_eq!(x, Duration::new(12*3600 + 5*60, 2)) -/// ``` -/// -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -pub struct Duration(StdDuration); - -/// A wrapper for SystemTime that has `FromStr` implementation -/// -/// This is useful if you want to use it somewhere where `FromStr` is -/// expected. -/// -/// See `parse_rfc3339_weak` for the description of the format. The "weak" -/// format is used as it's more pemissive for human input as this is the -/// expected use of the type (e.g. command-line parsing). -/// -/// # Example -/// -/// ``` -/// use std::time::SystemTime; -/// let x: SystemTime; -/// x = "2018-02-16T00:31:37Z".parse::().unwrap().into(); -/// assert_eq!(humantime::format_rfc3339(x).to_string(), "2018-02-16T00:31:37Z"); -/// ``` -/// -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct Timestamp(SystemTime); - -impl AsRef for Duration { - fn as_ref(&self) -> &StdDuration { &self.0 } -} - -impl Deref for Duration { - type Target = StdDuration; - fn deref(&self) -> &StdDuration { &self.0 } -} - -impl Into for Duration { - fn into(self) -> StdDuration { self.0 } -} - -impl From for Duration { - fn from(dur: StdDuration) -> Duration { Duration(dur) } -} - -impl FromStr for Duration { - type Err = duration::Error; - fn from_str(s: &str) -> Result { - parse_duration(s).map(Duration) - } -} - -impl fmt::Display for Duration { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - format_duration(self.0).fmt(f) - } -} - -impl AsRef for Timestamp { - fn as_ref(&self) -> &SystemTime { &self.0 } -} - -impl Deref for Timestamp { - type Target = SystemTime; - fn deref(&self) -> &SystemTime { &self.0 } -} - -impl Into for Timestamp { - fn into(self) -> SystemTime { self.0 } -} - -impl From for Timestamp { - fn from(dur: SystemTime) -> Timestamp { Timestamp(dur) } -} - -impl FromStr for Timestamp { - type Err = date::Error; - fn from_str(s: &str) -> Result { - parse_rfc3339_weak(s).map(Timestamp) - } -} - -impl fmt::Display for Timestamp { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - format_rfc3339(self.0).fmt(f) - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/vagga.yaml clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/vagga.yaml --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/humantime/vagga.yaml 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/humantime/vagga.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,92 +0,0 @@ -commands: - - cargo: !Command - description: Run any cargo command - container: ubuntu - run: [cargo] - - make: !Command - description: Build the library - container: ubuntu - run: [cargo, build] - - test64: !Command - description: Test the 64bit library - container: ubuntu - environ: { RUST_BACKTRACE: 1 } - run: [cargo, test] - - test32: !Command - description: Test the 32bit library - container: ubuntu32 - environ: { RUST_BACKTRACE: 1 } - run: [cargo, test] - - test: !Command - description: Test the 64bit library - container: ubuntu - environ: { RUST_BACKTRACE: 1 } - prerequisites: [test64, test32] - run: [echo, okay] - - bench: !Command - description: Run benchmarks - container: bench - environ: { RUST_BACKTRACE: 1 } - run: [cargo, bench] - - _bulk: !Command - description: Run `bulk` command (for version bookkeeping) - container: ubuntu - run: [bulk] - -containers: - - ubuntu: - setup: - - !Ubuntu xenial - - !UbuntuUniverse - - !Install [ca-certificates, build-essential, vim] - - - !TarInstall - url: "https://static.rust-lang.org/dist/rust-1.31.0-x86_64-unknown-linux-gnu.tar.gz" - script: "./install.sh --prefix=/usr \ - --components=rustc,rust-std-x86_64-unknown-linux-gnu,cargo" - - &bulk !Tar - url: "https://github.com/tailhook/bulk/releases/download/v0.4.10/bulk-v0.4.10.tar.gz" - sha256: 481513f8a0306a9857d045497fb5b50b50a51e9ff748909ecf7d2bda1de275ab - path: / - - environ: - HOME: /work/target - USER: pc - - ubuntu32: - setup: - - !UbuntuRelease - codename: xenial - arch: i386 - - !UbuntuUniverse - - !Install [ca-certificates, build-essential, vim] - - - !TarInstall - url: "https://static.rust-lang.org/dist/rust-1.31.0-i686-unknown-linux-gnu.tar.gz" - script: "./install.sh --prefix=/usr \ - --components=rustc,rust-std-i686-unknown-linux-gnu,cargo" - - environ: - HOME: /work/target - USER: pc - - bench: - setup: - - !Ubuntu xenial - - !Install [ca-certificates, wget, build-essential] - - !TarInstall - url: https://static.rust-lang.org/dist/rust-nightly-x86_64-unknown-linux-gnu.tar.gz - script: | - ./install.sh --prefix=/usr \ - --components=rustc,rust-std-x86_64-unknown-linux-gnu,cargo - environ: - HOME: /work/target - USER: pc diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/build.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/build.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/build.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/build.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,5 @@ +fn main() { + println!("cargo:rerun-if-changed=build.rs"); + + println!(concat!("cargo:VERSION=", env!("CARGO_PKG_VERSION"))); +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/.cargo-checksum.json clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/.cargo-checksum.json --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/.cargo-checksum.json 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"87f8623da0cebae59b77426a6a0dc4dc5626d137a8c1c92d27ae2d9ef04984dc","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"dde54d4d5c759fbaea82b4039ab9cea7ee91918431dae011ffe01472e2e9eb44","build.rs":"fdf8aa9b5441b298c72ae23645e227adc52ac69d2decc1bda04e1a91f70ff87d","examples/input.rs":"53350088f12a346a99034af41ef432dedcc9e5d581c5592d9aae3807c42656c1","examples/output.prettyplease.rs":"fa63c118daadb64c456ec5b8d5e46e5d7fabbbeb6a6e61a08eabc23360a18fbd","examples/output.rustc.rs":"0c66f8929fa40a2822d4ea1aec3d8b83db598aba043b17f3c1a6133f0d657135","examples/output.rustfmt.rs":"914a9aea1c51e097bfd80c9af4011811e6126c9df5fb0eac3d40b1203fba7c58","src/algorithm.rs":"26ef3e56629cc94278d3e2b8a0fa303acacb1322918a81e6bb3c8d81d13f389d","src/attr.rs":"54e829ae468f22c8e2853d669515575f1444bfee026cfd9b19538f77caf10ab7","src/convenience.rs":"dd392b009b691d3587c7d8e3caeaacf450303c4223792b5f89c336358e371c39","src/data.rs":"9db6623d3ccc79b541a28bdc88875ad0036576689e085007eb362819f8e9a2d3","src/expr.rs":"56976cb2c1434c494ab9805ae8add863d4f138a4492cc59dd285eeb85ec3ac28","src/file.rs":"5689efa3c5959a6a0d8cfc2c13bf8a37ab0669e2b81dbded3f3c28884a88fca0","src/generics.rs":"0099fbd3f1520237fea960a63cef6d846019f3f9ce3c3308f38cef7c817de5c2","src/item.rs":"c12a89007dce97761b66486db4769a395e849b26b6b90852881d7d08eec31636","src/iter.rs":"38b2cd3b38719c6024fb6b3aa739f6f8736c83193fd21e2365d4f6c27bc41666","src/lib.rs":"052da893c79a9797f5ae5dc6ba9a6fcbb0abb1a7970e25d891a0fdecdd3ace49","src/lifetime.rs":"6d420430168185b2da3409bc38a45f63cced9443915f04e6aec71367fc070dcf","src/lit.rs":"43ad4c7e1ff52b6649e79136186caefc1f3b064c04a01d12a5db511fa6f142d4","src/mac.rs":"343b7dd0c5001f3b38f7d15acacbd25924d0d4c2745606b0707ceff971984b98","src/pat.rs":"37d626c401b171f6accf245b8fee6110e948968f59dcafe0889a9375667315f2","src/path.rs":"f2689a5d401a7c9943f626cbd13cf65b97b988b8d4e015f3dbe8c7d9c7a415de","src/ring.rs":"e23d133209b977e457b07b0cd93b3711d01f4172d7cfa4cf6a7247637390e606","src/stmt.rs":"3aedfac053b5fdc395f44198d96ec6d2f2696e1ba27d32dde41bb9e34d7256c1","src/token.rs":"c288b1d81f2a35673d4ca1dd10d3386670b067460121df3038303e1ed73b41a7","src/ty.rs":"22b5866a1ee3cd667e28022b96f68ae963d7b615d082553fdf7b5f08075f2ed9"},"package":"1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058"} \ No newline at end of file diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/Cargo.toml clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/Cargo.toml --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/Cargo.toml 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,50 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.56" +name = "prettyplease" +version = "0.2.4" +authors = ["David Tolnay "] +links = "prettyplease02" +exclude = ["cargo-expand"] +autoexamples = false +description = "A minimal `syn` syntax tree pretty-printer" +documentation = "https://docs.rs/prettyplease" +readme = "README.md" +keywords = ["rustfmt"] +categories = ["development-tools"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/prettyplease" + +[package.metadata.playground] +features = ["verbatim"] + +[lib] +doc-scrape-examples = false + +[dependencies.proc-macro2] +version = "1.0" +default-features = false + +[dependencies.syn] +version = "2.0.10" +features = ["full"] +default-features = false + +[dev-dependencies.syn] +version = "2.0.10" +features = ["parsing"] +default-features = false + +[features] +verbatim = ["syn/parsing"] diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/input.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/input.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/input.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/input.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1 @@ +use crate :: cmp :: Ordering ; use crate :: fmt :: { self , Write as FmtWrite } ; use crate :: hash ; use crate :: io :: Write as IoWrite ; use crate :: mem :: transmute ; use crate :: sys :: net :: netc as c ; use crate :: sys_common :: { AsInner , FromInner , IntoInner } ; # [derive (Copy , Clone , Eq , PartialEq , Hash , PartialOrd , Ord)] pub enum IpAddr { V4 (Ipv4Addr) , V6 (Ipv6Addr) , } # [derive (Copy)] pub struct Ipv4Addr { inner : c :: in_addr , } # [derive (Copy)] pub struct Ipv6Addr { inner : c :: in6_addr , } # [derive (Copy , PartialEq , Eq , Clone , Hash , Debug)] # [non_exhaustive] pub enum Ipv6MulticastScope { InterfaceLocal , LinkLocal , RealmLocal , AdminLocal , SiteLocal , OrganizationLocal , Global , } impl IpAddr { pub const fn is_unspecified (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_unspecified () , IpAddr :: V6 (ip) => ip . is_unspecified () , } } pub const fn is_loopback (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_loopback () , IpAddr :: V6 (ip) => ip . is_loopback () , } } pub const fn is_global (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_global () , IpAddr :: V6 (ip) => ip . is_global () , } } pub const fn is_multicast (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_multicast () , IpAddr :: V6 (ip) => ip . is_multicast () , } } pub const fn is_documentation (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_documentation () , IpAddr :: V6 (ip) => ip . is_documentation () , } } pub const fn is_benchmarking (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_benchmarking () , IpAddr :: V6 (ip) => ip . is_benchmarking () , } } pub const fn is_ipv4 (& self) -> bool { matches ! (self , IpAddr :: V4 (_)) } pub const fn is_ipv6 (& self) -> bool { matches ! (self , IpAddr :: V6 (_)) } pub const fn to_canonical (& self) -> IpAddr { match self { & v4 @ IpAddr :: V4 (_) => v4 , IpAddr :: V6 (v6) => v6 . to_canonical () , } } } impl Ipv4Addr { pub const fn new (a : u8 , b : u8 , c : u8 , d : u8) -> Ipv4Addr { Ipv4Addr { inner : c :: in_addr { s_addr : u32 :: from_ne_bytes ([a , b , c , d]) } } } pub const LOCALHOST : Self = Ipv4Addr :: new (127 , 0 , 0 , 1) ; # [doc (alias = "INADDR_ANY")] pub const UNSPECIFIED : Self = Ipv4Addr :: new (0 , 0 , 0 , 0) ; pub const BROADCAST : Self = Ipv4Addr :: new (255 , 255 , 255 , 255) ; pub const fn octets (& self) -> [u8 ; 4] { self . inner . s_addr . to_ne_bytes () } pub const fn is_unspecified (& self) -> bool { self . inner . s_addr == 0 } pub const fn is_loopback (& self) -> bool { self . octets () [0] == 127 } pub const fn is_private (& self) -> bool { match self . octets () { [10 , ..] => true , [172 , b , ..] if b >= 16 && b <= 31 => true , [192 , 168 , ..] => true , _ => false , } } pub const fn is_link_local (& self) -> bool { matches ! (self . octets () , [169 , 254 , ..]) } pub const fn is_global (& self) -> bool { if u32 :: from_be_bytes (self . octets ()) == 0xc0000009 || u32 :: from_be_bytes (self . octets ()) == 0xc000000a { return true ; } ! self . is_private () && ! self . is_loopback () && ! self . is_link_local () && ! self . is_broadcast () && ! self . is_documentation () && ! self . is_shared () && ! (self . octets () [0] == 192 && self . octets () [1] == 0 && self . octets () [2] == 0) && ! self . is_reserved () && ! self . is_benchmarking () && self . octets () [0] != 0 } pub const fn is_shared (& self) -> bool { self . octets () [0] == 100 && (self . octets () [1] & 0b1100_0000 == 0b0100_0000) } pub const fn is_benchmarking (& self) -> bool { self . octets () [0] == 198 && (self . octets () [1] & 0xfe) == 18 } pub const fn is_reserved (& self) -> bool { self . octets () [0] & 240 == 240 && ! self . is_broadcast () } pub const fn is_multicast (& self) -> bool { self . octets () [0] >= 224 && self . octets () [0] <= 239 } pub const fn is_broadcast (& self) -> bool { u32 :: from_be_bytes (self . octets ()) == u32 :: from_be_bytes (Self :: BROADCAST . octets ()) } pub const fn is_documentation (& self) -> bool { matches ! (self . octets () , [192 , 0 , 2 , _] | [198 , 51 , 100 , _] | [203 , 0 , 113 , _]) } pub const fn to_ipv6_compatible (& self) -> Ipv6Addr { let [a , b , c , d] = self . octets () ; Ipv6Addr { inner : c :: in6_addr { s6_addr : [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , a , b , c , d] } , } } pub const fn to_ipv6_mapped (& self) -> Ipv6Addr { let [a , b , c , d] = self . octets () ; Ipv6Addr { inner : c :: in6_addr { s6_addr : [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0xFF , 0xFF , a , b , c , d] } , } } } impl fmt :: Display for IpAddr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { match self { IpAddr :: V4 (ip) => ip . fmt (fmt) , IpAddr :: V6 (ip) => ip . fmt (fmt) , } } } impl fmt :: Debug for IpAddr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { fmt :: Display :: fmt (self , fmt) } } impl From < Ipv4Addr > for IpAddr { fn from (ipv4 : Ipv4Addr) -> IpAddr { IpAddr :: V4 (ipv4) } } impl From < Ipv6Addr > for IpAddr { fn from (ipv6 : Ipv6Addr) -> IpAddr { IpAddr :: V6 (ipv6) } } impl fmt :: Display for Ipv4Addr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { let octets = self . octets () ; if fmt . precision () . is_none () && fmt . width () . is_none () { write ! (fmt , "{}.{}.{}.{}" , octets [0] , octets [1] , octets [2] , octets [3]) } else { const IPV4_BUF_LEN : usize = 15 ; let mut buf = [0u8 ; IPV4_BUF_LEN] ; let mut buf_slice = & mut buf [..] ; write ! (buf_slice , "{}.{}.{}.{}" , octets [0] , octets [1] , octets [2] , octets [3]) . unwrap () ; let len = IPV4_BUF_LEN - buf_slice . len () ; let buf = unsafe { crate :: str :: from_utf8_unchecked (& buf [.. len]) } ; fmt . pad (buf) } } } impl fmt :: Debug for Ipv4Addr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { fmt :: Display :: fmt (self , fmt) } } impl Clone for Ipv4Addr { fn clone (& self) -> Ipv4Addr { * self } } impl PartialEq for Ipv4Addr { fn eq (& self , other : & Ipv4Addr) -> bool { self . inner . s_addr == other . inner . s_addr } } impl PartialEq < Ipv4Addr > for IpAddr { fn eq (& self , other : & Ipv4Addr) -> bool { match self { IpAddr :: V4 (v4) => v4 == other , IpAddr :: V6 (_) => false , } } } impl PartialEq < IpAddr > for Ipv4Addr { fn eq (& self , other : & IpAddr) -> bool { match other { IpAddr :: V4 (v4) => self == v4 , IpAddr :: V6 (_) => false , } } } impl Eq for Ipv4Addr { } impl hash :: Hash for Ipv4Addr { fn hash < H : hash :: Hasher > (& self , s : & mut H) { { self . inner . s_addr } . hash (s) } } impl PartialOrd for Ipv4Addr { fn partial_cmp (& self , other : & Ipv4Addr) -> Option < Ordering > { Some (self . cmp (other)) } } impl PartialOrd < Ipv4Addr > for IpAddr { fn partial_cmp (& self , other : & Ipv4Addr) -> Option < Ordering > { match self { IpAddr :: V4 (v4) => v4 . partial_cmp (other) , IpAddr :: V6 (_) => Some (Ordering :: Greater) , } } } impl PartialOrd < IpAddr > for Ipv4Addr { fn partial_cmp (& self , other : & IpAddr) -> Option < Ordering > { match other { IpAddr :: V4 (v4) => self . partial_cmp (v4) , IpAddr :: V6 (_) => Some (Ordering :: Less) , } } } impl Ord for Ipv4Addr { fn cmp (& self , other : & Ipv4Addr) -> Ordering { u32 :: from_be (self . inner . s_addr) . cmp (& u32 :: from_be (other . inner . s_addr)) } } impl IntoInner < c :: in_addr > for Ipv4Addr { fn into_inner (self) -> c :: in_addr { self . inner } } impl From < Ipv4Addr > for u32 { fn from (ip : Ipv4Addr) -> u32 { let ip = ip . octets () ; u32 :: from_be_bytes (ip) } } impl From < u32 > for Ipv4Addr { fn from (ip : u32) -> Ipv4Addr { Ipv4Addr :: from (ip . to_be_bytes ()) } } impl From < [u8 ; 4] > for Ipv4Addr { fn from (octets : [u8 ; 4]) -> Ipv4Addr { Ipv4Addr :: new (octets [0] , octets [1] , octets [2] , octets [3]) } } impl From < [u8 ; 4] > for IpAddr { fn from (octets : [u8 ; 4]) -> IpAddr { IpAddr :: V4 (Ipv4Addr :: from (octets)) } } impl Ipv6Addr { pub const fn new (a : u16 , b : u16 , c : u16 , d : u16 , e : u16 , f : u16 , g : u16 , h : u16) -> Ipv6Addr { let addr16 = [a . to_be () , b . to_be () , c . to_be () , d . to_be () , e . to_be () , f . to_be () , g . to_be () , h . to_be () ,] ; Ipv6Addr { inner : c :: in6_addr { s6_addr : unsafe { transmute :: < _ , [u8 ; 16] > (addr16) } , } , } } pub const LOCALHOST : Self = Ipv6Addr :: new (0 , 0 , 0 , 0 , 0 , 0 , 0 , 1) ; pub const UNSPECIFIED : Self = Ipv6Addr :: new (0 , 0 , 0 , 0 , 0 , 0 , 0 , 0) ; pub const fn segments (& self) -> [u16 ; 8] { let [a , b , c , d , e , f , g , h] = unsafe { transmute :: < _ , [u16 ; 8] > (self . inner . s6_addr) } ; [u16 :: from_be (a) , u16 :: from_be (b) , u16 :: from_be (c) , u16 :: from_be (d) , u16 :: from_be (e) , u16 :: from_be (f) , u16 :: from_be (g) , u16 :: from_be (h) ,] } pub const fn is_unspecified (& self) -> bool { u128 :: from_be_bytes (self . octets ()) == u128 :: from_be_bytes (Ipv6Addr :: UNSPECIFIED . octets ()) } pub const fn is_loopback (& self) -> bool { u128 :: from_be_bytes (self . octets ()) == u128 :: from_be_bytes (Ipv6Addr :: LOCALHOST . octets ()) } pub const fn is_global (& self) -> bool { match self . multicast_scope () { Some (Ipv6MulticastScope :: Global) => true , None => self . is_unicast_global () , _ => false , } } pub const fn is_unique_local (& self) -> bool { (self . segments () [0] & 0xfe00) == 0xfc00 } pub const fn is_unicast (& self) -> bool { ! self . is_multicast () } pub const fn is_unicast_link_local (& self) -> bool { (self . segments () [0] & 0xffc0) == 0xfe80 } pub const fn is_documentation (& self) -> bool { (self . segments () [0] == 0x2001) && (self . segments () [1] == 0xdb8) } pub const fn is_benchmarking (& self) -> bool { (self . segments () [0] == 0x2001) && (self . segments () [1] == 0x2) && (self . segments () [2] == 0) } pub const fn is_unicast_global (& self) -> bool { self . is_unicast () && ! self . is_loopback () && ! self . is_unicast_link_local () && ! self . is_unique_local () && ! self . is_unspecified () && ! self . is_documentation () } pub const fn multicast_scope (& self) -> Option < Ipv6MulticastScope > { if self . is_multicast () { match self . segments () [0] & 0x000f { 1 => Some (Ipv6MulticastScope :: InterfaceLocal) , 2 => Some (Ipv6MulticastScope :: LinkLocal) , 3 => Some (Ipv6MulticastScope :: RealmLocal) , 4 => Some (Ipv6MulticastScope :: AdminLocal) , 5 => Some (Ipv6MulticastScope :: SiteLocal) , 8 => Some (Ipv6MulticastScope :: OrganizationLocal) , 14 => Some (Ipv6MulticastScope :: Global) , _ => None , } } else { None } } pub const fn is_multicast (& self) -> bool { (self . segments () [0] & 0xff00) == 0xff00 } pub const fn to_ipv4_mapped (& self) -> Option < Ipv4Addr > { match self . octets () { [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0xff , 0xff , a , b , c , d] => { Some (Ipv4Addr :: new (a , b , c , d)) } _ => None , } } pub const fn to_ipv4 (& self) -> Option < Ipv4Addr > { if let [0 , 0 , 0 , 0 , 0 , 0 | 0xffff , ab , cd] = self . segments () { let [a , b] = ab . to_be_bytes () ; let [c , d] = cd . to_be_bytes () ; Some (Ipv4Addr :: new (a , b , c , d)) } else { None } } pub const fn to_canonical (& self) -> IpAddr { if let Some (mapped) = self . to_ipv4_mapped () { return IpAddr :: V4 (mapped) ; } IpAddr :: V6 (* self) } pub const fn octets (& self) -> [u8 ; 16] { self . inner . s6_addr } } impl fmt :: Display for Ipv6Addr { fn fmt (& self , f : & mut fmt :: Formatter < '_ >) -> fmt :: Result { if f . precision () . is_none () && f . width () . is_none () { let segments = self . segments () ; if self . is_unspecified () { f . write_str ("::") } else if self . is_loopback () { f . write_str ("::1") } else if let Some (ipv4) = self . to_ipv4 () { match segments [5] { 0 => write ! (f , "::{}" , ipv4) , 0xffff => write ! (f , "::ffff:{}" , ipv4) , _ => unreachable ! () , } } else { # [derive (Copy , Clone , Default)] struct Span { start : usize , len : usize , } let zeroes = { let mut longest = Span :: default () ; let mut current = Span :: default () ; for (i , & segment) in segments . iter () . enumerate () { if segment == 0 { if current . len == 0 { current . start = i ; } current . len += 1 ; if current . len > longest . len { longest = current ; } } else { current = Span :: default () ; } } longest } ; # [doc = " Write a colon-separated part of the address"] # [inline] fn fmt_subslice (f : & mut fmt :: Formatter < '_ > , chunk : & [u16]) -> fmt :: Result { if let Some ((first , tail)) = chunk . split_first () { write ! (f , "{:x}" , first) ? ; for segment in tail { f . write_char (':') ? ; write ! (f , "{:x}" , segment) ? ; } } Ok (()) } if zeroes . len > 1 { fmt_subslice (f , & segments [.. zeroes . start]) ? ; f . write_str ("::") ? ; fmt_subslice (f , & segments [zeroes . start + zeroes . len ..]) } else { fmt_subslice (f , & segments) } } } else { const IPV6_BUF_LEN : usize = (4 * 8) + 7 ; let mut buf = [0u8 ; IPV6_BUF_LEN] ; let mut buf_slice = & mut buf [..] ; write ! (buf_slice , "{}" , self) . unwrap () ; let len = IPV6_BUF_LEN - buf_slice . len () ; let buf = unsafe { crate :: str :: from_utf8_unchecked (& buf [.. len]) } ; f . pad (buf) } } } impl fmt :: Debug for Ipv6Addr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { fmt :: Display :: fmt (self , fmt) } } impl Clone for Ipv6Addr { fn clone (& self) -> Ipv6Addr { * self } } impl PartialEq for Ipv6Addr { fn eq (& self , other : & Ipv6Addr) -> bool { self . inner . s6_addr == other . inner . s6_addr } } impl PartialEq < IpAddr > for Ipv6Addr { fn eq (& self , other : & IpAddr) -> bool { match other { IpAddr :: V4 (_) => false , IpAddr :: V6 (v6) => self == v6 , } } } impl PartialEq < Ipv6Addr > for IpAddr { fn eq (& self , other : & Ipv6Addr) -> bool { match self { IpAddr :: V4 (_) => false , IpAddr :: V6 (v6) => v6 == other , } } } impl Eq for Ipv6Addr { } impl hash :: Hash for Ipv6Addr { fn hash < H : hash :: Hasher > (& self , s : & mut H) { self . inner . s6_addr . hash (s) } } impl PartialOrd for Ipv6Addr { fn partial_cmp (& self , other : & Ipv6Addr) -> Option < Ordering > { Some (self . cmp (other)) } } impl PartialOrd < Ipv6Addr > for IpAddr { fn partial_cmp (& self , other : & Ipv6Addr) -> Option < Ordering > { match self { IpAddr :: V4 (_) => Some (Ordering :: Less) , IpAddr :: V6 (v6) => v6 . partial_cmp (other) , } } } impl PartialOrd < IpAddr > for Ipv6Addr { fn partial_cmp (& self , other : & IpAddr) -> Option < Ordering > { match other { IpAddr :: V4 (_) => Some (Ordering :: Greater) , IpAddr :: V6 (v6) => self . partial_cmp (v6) , } } } impl Ord for Ipv6Addr { fn cmp (& self , other : & Ipv6Addr) -> Ordering { self . segments () . cmp (& other . segments ()) } } impl AsInner < c :: in6_addr > for Ipv6Addr { fn as_inner (& self) -> & c :: in6_addr { & self . inner } } impl FromInner < c :: in6_addr > for Ipv6Addr { fn from_inner (addr : c :: in6_addr) -> Ipv6Addr { Ipv6Addr { inner : addr } } } impl From < Ipv6Addr > for u128 { fn from (ip : Ipv6Addr) -> u128 { let ip = ip . octets () ; u128 :: from_be_bytes (ip) } } impl From < u128 > for Ipv6Addr { fn from (ip : u128) -> Ipv6Addr { Ipv6Addr :: from (ip . to_be_bytes ()) } } impl From < [u8 ; 16] > for Ipv6Addr { fn from (octets : [u8 ; 16]) -> Ipv6Addr { let inner = c :: in6_addr { s6_addr : octets } ; Ipv6Addr :: from_inner (inner) } } impl From < [u16 ; 8] > for Ipv6Addr { fn from (segments : [u16 ; 8]) -> Ipv6Addr { let [a , b , c , d , e , f , g , h] = segments ; Ipv6Addr :: new (a , b , c , d , e , f , g , h) } } impl From < [u8 ; 16] > for IpAddr { fn from (octets : [u8 ; 16]) -> IpAddr { IpAddr :: V6 (Ipv6Addr :: from (octets)) } } impl From < [u16 ; 8] > for IpAddr { fn from (segments : [u16 ; 8]) -> IpAddr { IpAddr :: V6 (Ipv6Addr :: from (segments)) } } diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/output.prettyplease.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/output.prettyplease.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/output.prettyplease.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/output.prettyplease.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,593 @@ +use crate::cmp::Ordering; +use crate::fmt::{self, Write as FmtWrite}; +use crate::hash; +use crate::io::Write as IoWrite; +use crate::mem::transmute; +use crate::sys::net::netc as c; +use crate::sys_common::{AsInner, FromInner, IntoInner}; +#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] +pub enum IpAddr { + V4(Ipv4Addr), + V6(Ipv6Addr), +} +#[derive(Copy)] +pub struct Ipv4Addr { + inner: c::in_addr, +} +#[derive(Copy)] +pub struct Ipv6Addr { + inner: c::in6_addr, +} +#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)] +#[non_exhaustive] +pub enum Ipv6MulticastScope { + InterfaceLocal, + LinkLocal, + RealmLocal, + AdminLocal, + SiteLocal, + OrganizationLocal, + Global, +} +impl IpAddr { + pub const fn is_unspecified(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_unspecified(), + IpAddr::V6(ip) => ip.is_unspecified(), + } + } + pub const fn is_loopback(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_loopback(), + IpAddr::V6(ip) => ip.is_loopback(), + } + } + pub const fn is_global(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_global(), + IpAddr::V6(ip) => ip.is_global(), + } + } + pub const fn is_multicast(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_multicast(), + IpAddr::V6(ip) => ip.is_multicast(), + } + } + pub const fn is_documentation(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_documentation(), + IpAddr::V6(ip) => ip.is_documentation(), + } + } + pub const fn is_benchmarking(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_benchmarking(), + IpAddr::V6(ip) => ip.is_benchmarking(), + } + } + pub const fn is_ipv4(&self) -> bool { + matches!(self, IpAddr::V4(_)) + } + pub const fn is_ipv6(&self) -> bool { + matches!(self, IpAddr::V6(_)) + } + pub const fn to_canonical(&self) -> IpAddr { + match self { + &v4 @ IpAddr::V4(_) => v4, + IpAddr::V6(v6) => v6.to_canonical(), + } + } +} +impl Ipv4Addr { + pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr { + Ipv4Addr { + inner: c::in_addr { + s_addr: u32::from_ne_bytes([a, b, c, d]), + }, + } + } + pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1); + #[doc(alias = "INADDR_ANY")] + pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0); + pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255); + pub const fn octets(&self) -> [u8; 4] { + self.inner.s_addr.to_ne_bytes() + } + pub const fn is_unspecified(&self) -> bool { + self.inner.s_addr == 0 + } + pub const fn is_loopback(&self) -> bool { + self.octets()[0] == 127 + } + pub const fn is_private(&self) -> bool { + match self.octets() { + [10, ..] => true, + [172, b, ..] if b >= 16 && b <= 31 => true, + [192, 168, ..] => true, + _ => false, + } + } + pub const fn is_link_local(&self) -> bool { + matches!(self.octets(), [169, 254, ..]) + } + pub const fn is_global(&self) -> bool { + if u32::from_be_bytes(self.octets()) == 0xc0000009 + || u32::from_be_bytes(self.octets()) == 0xc000000a + { + return true; + } + !self.is_private() && !self.is_loopback() && !self.is_link_local() + && !self.is_broadcast() && !self.is_documentation() && !self.is_shared() + && !(self.octets()[0] == 192 && self.octets()[1] == 0 + && self.octets()[2] == 0) && !self.is_reserved() + && !self.is_benchmarking() && self.octets()[0] != 0 + } + pub const fn is_shared(&self) -> bool { + self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000) + } + pub const fn is_benchmarking(&self) -> bool { + self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18 + } + pub const fn is_reserved(&self) -> bool { + self.octets()[0] & 240 == 240 && !self.is_broadcast() + } + pub const fn is_multicast(&self) -> bool { + self.octets()[0] >= 224 && self.octets()[0] <= 239 + } + pub const fn is_broadcast(&self) -> bool { + u32::from_be_bytes(self.octets()) == u32::from_be_bytes(Self::BROADCAST.octets()) + } + pub const fn is_documentation(&self) -> bool { + matches!(self.octets(), [192, 0, 2, _] | [198, 51, 100, _] | [203, 0, 113, _]) + } + pub const fn to_ipv6_compatible(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { + inner: c::in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d], + }, + } + } + pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { + inner: c::in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, d], + }, + } + } +} +impl fmt::Display for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + IpAddr::V4(ip) => ip.fmt(fmt), + IpAddr::V6(ip) => ip.fmt(fmt), + } + } +} +impl fmt::Debug for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl From for IpAddr { + fn from(ipv4: Ipv4Addr) -> IpAddr { + IpAddr::V4(ipv4) + } +} +impl From for IpAddr { + fn from(ipv6: Ipv6Addr) -> IpAddr { + IpAddr::V6(ipv6) + } +} +impl fmt::Display for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let octets = self.octets(); + if fmt.precision().is_none() && fmt.width().is_none() { + write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]) + } else { + const IPV4_BUF_LEN: usize = 15; + let mut buf = [0u8; IPV4_BUF_LEN]; + let mut buf_slice = &mut buf[..]; + write!(buf_slice, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]) + .unwrap(); + let len = IPV4_BUF_LEN - buf_slice.len(); + let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; + fmt.pad(buf) + } + } +} +impl fmt::Debug for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl Clone for Ipv4Addr { + fn clone(&self) -> Ipv4Addr { + *self + } +} +impl PartialEq for Ipv4Addr { + fn eq(&self, other: &Ipv4Addr) -> bool { + self.inner.s_addr == other.inner.s_addr + } +} +impl PartialEq for IpAddr { + fn eq(&self, other: &Ipv4Addr) -> bool { + match self { + IpAddr::V4(v4) => v4 == other, + IpAddr::V6(_) => false, + } + } +} +impl PartialEq for Ipv4Addr { + fn eq(&self, other: &IpAddr) -> bool { + match other { + IpAddr::V4(v4) => self == v4, + IpAddr::V6(_) => false, + } + } +} +impl Eq for Ipv4Addr {} +impl hash::Hash for Ipv4Addr { + fn hash(&self, s: &mut H) { + { self.inner.s_addr }.hash(s) + } +} +impl PartialOrd for Ipv4Addr { + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + Some(self.cmp(other)) + } +} +impl PartialOrd for IpAddr { + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + match self { + IpAddr::V4(v4) => v4.partial_cmp(other), + IpAddr::V6(_) => Some(Ordering::Greater), + } + } +} +impl PartialOrd for Ipv4Addr { + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(v4) => self.partial_cmp(v4), + IpAddr::V6(_) => Some(Ordering::Less), + } + } +} +impl Ord for Ipv4Addr { + fn cmp(&self, other: &Ipv4Addr) -> Ordering { + u32::from_be(self.inner.s_addr).cmp(&u32::from_be(other.inner.s_addr)) + } +} +impl IntoInner for Ipv4Addr { + fn into_inner(self) -> c::in_addr { + self.inner + } +} +impl From for u32 { + fn from(ip: Ipv4Addr) -> u32 { + let ip = ip.octets(); + u32::from_be_bytes(ip) + } +} +impl From for Ipv4Addr { + fn from(ip: u32) -> Ipv4Addr { + Ipv4Addr::from(ip.to_be_bytes()) + } +} +impl From<[u8; 4]> for Ipv4Addr { + fn from(octets: [u8; 4]) -> Ipv4Addr { + Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3]) + } +} +impl From<[u8; 4]> for IpAddr { + fn from(octets: [u8; 4]) -> IpAddr { + IpAddr::V4(Ipv4Addr::from(octets)) + } +} +impl Ipv6Addr { + pub const fn new( + a: u16, + b: u16, + c: u16, + d: u16, + e: u16, + f: u16, + g: u16, + h: u16, + ) -> Ipv6Addr { + let addr16 = [ + a.to_be(), + b.to_be(), + c.to_be(), + d.to_be(), + e.to_be(), + f.to_be(), + g.to_be(), + h.to_be(), + ]; + Ipv6Addr { + inner: c::in6_addr { + s6_addr: unsafe { transmute::<_, [u8; 16]>(addr16) }, + }, + } + } + pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0); + pub const fn segments(&self) -> [u16; 8] { + let [a, b, c, d, e, f, g, h] = unsafe { + transmute::<_, [u16; 8]>(self.inner.s6_addr) + }; + [ + u16::from_be(a), + u16::from_be(b), + u16::from_be(c), + u16::from_be(d), + u16::from_be(e), + u16::from_be(f), + u16::from_be(g), + u16::from_be(h), + ] + } + pub const fn is_unspecified(&self) -> bool { + u128::from_be_bytes(self.octets()) + == u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets()) + } + pub const fn is_loopback(&self) -> bool { + u128::from_be_bytes(self.octets()) + == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets()) + } + pub const fn is_global(&self) -> bool { + match self.multicast_scope() { + Some(Ipv6MulticastScope::Global) => true, + None => self.is_unicast_global(), + _ => false, + } + } + pub const fn is_unique_local(&self) -> bool { + (self.segments()[0] & 0xfe00) == 0xfc00 + } + pub const fn is_unicast(&self) -> bool { + !self.is_multicast() + } + pub const fn is_unicast_link_local(&self) -> bool { + (self.segments()[0] & 0xffc0) == 0xfe80 + } + pub const fn is_documentation(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) + } + pub const fn is_benchmarking(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) + && (self.segments()[2] == 0) + } + pub const fn is_unicast_global(&self) -> bool { + self.is_unicast() && !self.is_loopback() && !self.is_unicast_link_local() + && !self.is_unique_local() && !self.is_unspecified() + && !self.is_documentation() + } + pub const fn multicast_scope(&self) -> Option { + if self.is_multicast() { + match self.segments()[0] & 0x000f { + 1 => Some(Ipv6MulticastScope::InterfaceLocal), + 2 => Some(Ipv6MulticastScope::LinkLocal), + 3 => Some(Ipv6MulticastScope::RealmLocal), + 4 => Some(Ipv6MulticastScope::AdminLocal), + 5 => Some(Ipv6MulticastScope::SiteLocal), + 8 => Some(Ipv6MulticastScope::OrganizationLocal), + 14 => Some(Ipv6MulticastScope::Global), + _ => None, + } + } else { + None + } + } + pub const fn is_multicast(&self) -> bool { + (self.segments()[0] & 0xff00) == 0xff00 + } + pub const fn to_ipv4_mapped(&self) -> Option { + match self.octets() { + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { + Some(Ipv4Addr::new(a, b, c, d)) + } + _ => None, + } + } + pub const fn to_ipv4(&self) -> Option { + if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() { + let [a, b] = ab.to_be_bytes(); + let [c, d] = cd.to_be_bytes(); + Some(Ipv4Addr::new(a, b, c, d)) + } else { + None + } + } + pub const fn to_canonical(&self) -> IpAddr { + if let Some(mapped) = self.to_ipv4_mapped() { + return IpAddr::V4(mapped); + } + IpAddr::V6(*self) + } + pub const fn octets(&self) -> [u8; 16] { + self.inner.s6_addr + } +} +impl fmt::Display for Ipv6Addr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if f.precision().is_none() && f.width().is_none() { + let segments = self.segments(); + if self.is_unspecified() { + f.write_str("::") + } else if self.is_loopback() { + f.write_str("::1") + } else if let Some(ipv4) = self.to_ipv4() { + match segments[5] { + 0 => write!(f, "::{}", ipv4), + 0xffff => write!(f, "::ffff:{}", ipv4), + _ => unreachable!(), + } + } else { + #[derive(Copy, Clone, Default)] + struct Span { + start: usize, + len: usize, + } + let zeroes = { + let mut longest = Span::default(); + let mut current = Span::default(); + for (i, &segment) in segments.iter().enumerate() { + if segment == 0 { + if current.len == 0 { + current.start = i; + } + current.len += 1; + if current.len > longest.len { + longest = current; + } + } else { + current = Span::default(); + } + } + longest + }; + /// Write a colon-separated part of the address + #[inline] + fn fmt_subslice( + f: &mut fmt::Formatter<'_>, + chunk: &[u16], + ) -> fmt::Result { + if let Some((first, tail)) = chunk.split_first() { + write!(f, "{:x}", first)?; + for segment in tail { + f.write_char(':')?; + write!(f, "{:x}", segment)?; + } + } + Ok(()) + } + if zeroes.len > 1 { + fmt_subslice(f, &segments[..zeroes.start])?; + f.write_str("::")?; + fmt_subslice(f, &segments[zeroes.start + zeroes.len..]) + } else { + fmt_subslice(f, &segments) + } + } + } else { + const IPV6_BUF_LEN: usize = (4 * 8) + 7; + let mut buf = [0u8; IPV6_BUF_LEN]; + let mut buf_slice = &mut buf[..]; + write!(buf_slice, "{}", self).unwrap(); + let len = IPV6_BUF_LEN - buf_slice.len(); + let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; + f.pad(buf) + } + } +} +impl fmt::Debug for Ipv6Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl Clone for Ipv6Addr { + fn clone(&self) -> Ipv6Addr { + *self + } +} +impl PartialEq for Ipv6Addr { + fn eq(&self, other: &Ipv6Addr) -> bool { + self.inner.s6_addr == other.inner.s6_addr + } +} +impl PartialEq for Ipv6Addr { + fn eq(&self, other: &IpAddr) -> bool { + match other { + IpAddr::V4(_) => false, + IpAddr::V6(v6) => self == v6, + } + } +} +impl PartialEq for IpAddr { + fn eq(&self, other: &Ipv6Addr) -> bool { + match self { + IpAddr::V4(_) => false, + IpAddr::V6(v6) => v6 == other, + } + } +} +impl Eq for Ipv6Addr {} +impl hash::Hash for Ipv6Addr { + fn hash(&self, s: &mut H) { + self.inner.s6_addr.hash(s) + } +} +impl PartialOrd for Ipv6Addr { + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + Some(self.cmp(other)) + } +} +impl PartialOrd for IpAddr { + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + match self { + IpAddr::V4(_) => Some(Ordering::Less), + IpAddr::V6(v6) => v6.partial_cmp(other), + } + } +} +impl PartialOrd for Ipv6Addr { + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(_) => Some(Ordering::Greater), + IpAddr::V6(v6) => self.partial_cmp(v6), + } + } +} +impl Ord for Ipv6Addr { + fn cmp(&self, other: &Ipv6Addr) -> Ordering { + self.segments().cmp(&other.segments()) + } +} +impl AsInner for Ipv6Addr { + fn as_inner(&self) -> &c::in6_addr { + &self.inner + } +} +impl FromInner for Ipv6Addr { + fn from_inner(addr: c::in6_addr) -> Ipv6Addr { + Ipv6Addr { inner: addr } + } +} +impl From for u128 { + fn from(ip: Ipv6Addr) -> u128 { + let ip = ip.octets(); + u128::from_be_bytes(ip) + } +} +impl From for Ipv6Addr { + fn from(ip: u128) -> Ipv6Addr { + Ipv6Addr::from(ip.to_be_bytes()) + } +} +impl From<[u8; 16]> for Ipv6Addr { + fn from(octets: [u8; 16]) -> Ipv6Addr { + let inner = c::in6_addr { s6_addr: octets }; + Ipv6Addr::from_inner(inner) + } +} +impl From<[u16; 8]> for Ipv6Addr { + fn from(segments: [u16; 8]) -> Ipv6Addr { + let [a, b, c, d, e, f, g, h] = segments; + Ipv6Addr::new(a, b, c, d, e, f, g, h) + } +} +impl From<[u8; 16]> for IpAddr { + fn from(octets: [u8; 16]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(octets)) + } +} +impl From<[u16; 8]> for IpAddr { + fn from(segments: [u16; 8]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(segments)) + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/output.rustc.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/output.rustc.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/output.rustc.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/output.rustc.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,508 @@ +use crate::cmp::Ordering;use crate::fmt::{self, Write as FmtWrite}; +use crate::hash; +use crate::io::Write as IoWrite; +use crate::mem::transmute; +use crate::sys::net::netc as c; +use crate::sys_common::{AsInner, FromInner, IntoInner}; +#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] +pub enum IpAddr { V4(Ipv4Addr), V6(Ipv6Addr), } +#[derive(Copy)] +pub struct Ipv4Addr { + inner: c::in_addr, +} +#[derive(Copy)] +pub struct Ipv6Addr { + inner: c::in6_addr, +} +#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)] +#[non_exhaustive] +pub enum Ipv6MulticastScope { + InterfaceLocal, + LinkLocal, + RealmLocal, + AdminLocal, + SiteLocal, + OrganizationLocal, + Global, +} +impl IpAddr { + pub const fn is_unspecified(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_unspecified(), + IpAddr::V6(ip) => ip.is_unspecified(), + } + } + pub const fn is_loopback(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_loopback(), + IpAddr::V6(ip) => ip.is_loopback(), + } + } + pub const fn is_global(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_global(), + IpAddr::V6(ip) => ip.is_global(), + } + } + pub const fn is_multicast(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_multicast(), + IpAddr::V6(ip) => ip.is_multicast(), + } + } + pub const fn is_documentation(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_documentation(), + IpAddr::V6(ip) => ip.is_documentation(), + } + } + pub const fn is_benchmarking(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_benchmarking(), + IpAddr::V6(ip) => ip.is_benchmarking(), + } + } + pub const fn is_ipv4(&self) -> bool { matches!(self, IpAddr :: V4(_)) } + pub const fn is_ipv6(&self) -> bool { matches!(self, IpAddr :: V6(_)) } + pub const fn to_canonical(&self) -> IpAddr { + match self { + &v4 @ IpAddr::V4(_) => v4, + IpAddr::V6(v6) => v6.to_canonical(), + } + } +} +impl Ipv4Addr { + pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr { + Ipv4Addr { + inner: c::in_addr { s_addr: u32::from_ne_bytes([a, b, c, d]) }, + } + } + pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1); + #[doc(alias = "INADDR_ANY")] + pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0); + pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255); + pub const fn octets(&self) -> [u8; 4] { self.inner.s_addr.to_ne_bytes() } + pub const fn is_unspecified(&self) -> bool { self.inner.s_addr == 0 } + pub const fn is_loopback(&self) -> bool { self.octets()[0] == 127 } + pub const fn is_private(&self) -> bool { + match self.octets() { + [10, ..] => true, + [172, b, ..] if b >= 16 && b <= 31 => true, + [192, 168, ..] => true, + _ => false, + } + } + pub const fn is_link_local(&self) -> bool { + matches!(self.octets(), [169, 254, ..]) + } + pub const fn is_global(&self) -> bool { + if u32::from_be_bytes(self.octets()) == 0xc0000009 || + u32::from_be_bytes(self.octets()) == 0xc000000a { + return true; + } + !self.is_private() && !self.is_loopback() && !self.is_link_local() && + !self.is_broadcast() && !self.is_documentation() && + !self.is_shared() && + !(self.octets()[0] == 192 && self.octets()[1] == 0 && + self.octets()[2] == 0) && !self.is_reserved() && + !self.is_benchmarking() && self.octets()[0] != 0 + } + pub const fn is_shared(&self) -> bool { + self.octets()[0] == 100 && + (self.octets()[1] & 0b1100_0000 == 0b0100_0000) + } + pub const fn is_benchmarking(&self) -> bool { + self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18 + } + pub const fn is_reserved(&self) -> bool { + self.octets()[0] & 240 == 240 && !self.is_broadcast() + } + pub const fn is_multicast(&self) -> bool { + self.octets()[0] >= 224 && self.octets()[0] <= 239 + } + pub const fn is_broadcast(&self) -> bool { + u32::from_be_bytes(self.octets()) == + u32::from_be_bytes(Self::BROADCAST.octets()) + } + pub const fn is_documentation(&self) -> bool { + matches!(self.octets(), [192, 0, 2, _] | [198, 51, 100, _] | + [203, 0, 113, _]) + } + pub const fn to_ipv6_compatible(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { + inner: c::in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d], + }, + } + } + pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { + inner: c::in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, + d], + }, + } + } +} +impl fmt::Display for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + IpAddr::V4(ip) => ip.fmt(fmt), + IpAddr::V6(ip) => ip.fmt(fmt), + } + } +} +impl fmt::Debug for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl From for IpAddr { + fn from(ipv4: Ipv4Addr) -> IpAddr { IpAddr::V4(ipv4) } +} +impl From for IpAddr { + fn from(ipv6: Ipv6Addr) -> IpAddr { IpAddr::V6(ipv6) } +} +impl fmt::Display for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let octets = self.octets(); + if fmt.precision().is_none() && fmt.width().is_none() { + write!(fmt, "{}.{}.{}.{}", octets [0], octets [1], octets [2], + octets [3]) + } else { + const IPV4_BUF_LEN: usize = 15; + let mut buf = [0u8; IPV4_BUF_LEN]; + let mut buf_slice = &mut buf[..]; + write!(buf_slice, "{}.{}.{}.{}", octets [0], octets [1], octets + [2], octets [3]).unwrap(); + let len = IPV4_BUF_LEN - buf_slice.len(); + let buf = + unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; + fmt.pad(buf) + } + } +} +impl fmt::Debug for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl Clone for Ipv4Addr { + fn clone(&self) -> Ipv4Addr { *self } +} +impl PartialEq for Ipv4Addr { + fn eq(&self, other: &Ipv4Addr) -> bool { + self.inner.s_addr == other.inner.s_addr + } +} +impl PartialEq for IpAddr { + fn eq(&self, other: &Ipv4Addr) -> bool { + match self { IpAddr::V4(v4) => v4 == other, IpAddr::V6(_) => false, } + } +} +impl PartialEq for Ipv4Addr { + fn eq(&self, other: &IpAddr) -> bool { + match other { IpAddr::V4(v4) => self == v4, IpAddr::V6(_) => false, } + } +} +impl Eq for Ipv4Addr {} +impl hash::Hash for Ipv4Addr { + fn hash(&self, s: &mut H) { + { self.inner.s_addr }.hash(s) + } +} +impl PartialOrd for Ipv4Addr { + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + Some(self.cmp(other)) + } +} +impl PartialOrd for IpAddr { + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + match self { + IpAddr::V4(v4) => v4.partial_cmp(other), + IpAddr::V6(_) => Some(Ordering::Greater), + } + } +} +impl PartialOrd for Ipv4Addr { + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(v4) => self.partial_cmp(v4), + IpAddr::V6(_) => Some(Ordering::Less), + } + } +} +impl Ord for Ipv4Addr { + fn cmp(&self, other: &Ipv4Addr) -> Ordering { + u32::from_be(self.inner.s_addr).cmp(&u32::from_be(other.inner.s_addr)) + } +} +impl IntoInner for Ipv4Addr { + fn into_inner(self) -> c::in_addr { self.inner } +} +impl From for u32 { + fn from(ip: Ipv4Addr) -> u32 { + let ip = ip.octets(); + u32::from_be_bytes(ip) + } +} +impl From for Ipv4Addr { + fn from(ip: u32) -> Ipv4Addr { Ipv4Addr::from(ip.to_be_bytes()) } +} +impl From<[u8; 4]> for Ipv4Addr { + fn from(octets: [u8; 4]) -> Ipv4Addr { + Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3]) + } +} +impl From<[u8; 4]> for IpAddr { + fn from(octets: [u8; 4]) -> IpAddr { IpAddr::V4(Ipv4Addr::from(octets)) } +} +impl Ipv6Addr { + pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, + h: u16) -> Ipv6Addr { + let addr16 = + [a.to_be(), b.to_be(), c.to_be(), d.to_be(), e.to_be(), f.to_be(), + g.to_be(), h.to_be()]; + Ipv6Addr { + inner: c::in6_addr { + s6_addr: unsafe { transmute::<_, [u8; 16]>(addr16) }, + }, + } + } + pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0); + pub const fn segments(&self) -> [u16; 8] { + let [a, b, c, d, e, f, g, h] = + unsafe { transmute::<_, [u16; 8]>(self.inner.s6_addr) }; + [u16::from_be(a), u16::from_be(b), u16::from_be(c), u16::from_be(d), + u16::from_be(e), u16::from_be(f), u16::from_be(g), + u16::from_be(h)] + } + pub const fn is_unspecified(&self) -> bool { + u128::from_be_bytes(self.octets()) == + u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets()) + } + pub const fn is_loopback(&self) -> bool { + u128::from_be_bytes(self.octets()) == + u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets()) + } + pub const fn is_global(&self) -> bool { + match self.multicast_scope() { + Some(Ipv6MulticastScope::Global) => true, + None => self.is_unicast_global(), + _ => false, + } + } + pub const fn is_unique_local(&self) -> bool { + (self.segments()[0] & 0xfe00) == 0xfc00 + } + pub const fn is_unicast(&self) -> bool { !self.is_multicast() } + pub const fn is_unicast_link_local(&self) -> bool { + (self.segments()[0] & 0xffc0) == 0xfe80 + } + pub const fn is_documentation(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) + } + pub const fn is_benchmarking(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) && + (self.segments()[2] == 0) + } + pub const fn is_unicast_global(&self) -> bool { + self.is_unicast() && !self.is_loopback() && + !self.is_unicast_link_local() && !self.is_unique_local() && + !self.is_unspecified() && !self.is_documentation() + } + pub const fn multicast_scope(&self) -> Option { + if self.is_multicast() { + match self.segments()[0] & 0x000f { + 1 => Some(Ipv6MulticastScope::InterfaceLocal), + 2 => Some(Ipv6MulticastScope::LinkLocal), + 3 => Some(Ipv6MulticastScope::RealmLocal), + 4 => Some(Ipv6MulticastScope::AdminLocal), + 5 => Some(Ipv6MulticastScope::SiteLocal), + 8 => Some(Ipv6MulticastScope::OrganizationLocal), + 14 => Some(Ipv6MulticastScope::Global), + _ => None, + } + } else { None } + } + pub const fn is_multicast(&self) -> bool { + (self.segments()[0] & 0xff00) == 0xff00 + } + pub const fn to_ipv4_mapped(&self) -> Option { + match self.octets() { + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { + Some(Ipv4Addr::new(a, b, c, d)) + } + _ => None, + } + } + pub const fn to_ipv4(&self) -> Option { + if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() { + let [a, b] = ab.to_be_bytes(); + let [c, d] = cd.to_be_bytes(); + Some(Ipv4Addr::new(a, b, c, d)) + } else { None } + } + pub const fn to_canonical(&self) -> IpAddr { + if let Some(mapped) = self.to_ipv4_mapped() { + return IpAddr::V4(mapped); + } + IpAddr::V6(*self) + } + pub const fn octets(&self) -> [u8; 16] { self.inner.s6_addr } +} +impl fmt::Display for Ipv6Addr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if f.precision().is_none() && f.width().is_none() { + let segments = self.segments(); + if self.is_unspecified() { + f.write_str("::") + } else if self.is_loopback() { + f.write_str("::1") + } else if let Some(ipv4) = self.to_ipv4() { + match segments[5] { + 0 => write!(f, "::{}", ipv4), + 0xffff => write!(f, "::ffff:{}", ipv4), + _ => unreachable!(), + } + } else { + #[derive(Copy, Clone, Default)] + struct Span { + start: usize, + len: usize, + } + let zeroes = + { + let mut longest = Span::default(); + let mut current = Span::default(); + for (i, &segment) in segments.iter().enumerate() { + if segment == 0 { + if current.len == 0 { current.start = i; } + current.len += 1; + if current.len > longest.len { longest = current; } + } else { current = Span::default(); } + } + longest + }; + #[doc = " Write a colon-separated part of the address"] + #[inline] + fn fmt_subslice(f: &mut fmt::Formatter<'_>, chunk: &[u16]) + -> fmt::Result { + if let Some((first, tail)) = chunk.split_first() { + write!(f, "{:x}", first)?; + for segment in tail { + f.write_char(':')?; + write!(f, "{:x}", segment)?; + } + } + Ok(()) + } + if zeroes.len > 1 { + fmt_subslice(f, &segments[..zeroes.start])?; + f.write_str("::")?; + fmt_subslice(f, &segments[zeroes.start + zeroes.len..]) + } else { fmt_subslice(f, &segments) } + } + } else { + const IPV6_BUF_LEN: usize = (4 * 8) + 7; + let mut buf = [0u8; IPV6_BUF_LEN]; + let mut buf_slice = &mut buf[..]; + write!(buf_slice, "{}", self).unwrap(); + let len = IPV6_BUF_LEN - buf_slice.len(); + let buf = + unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; + f.pad(buf) + } + } +} +impl fmt::Debug for Ipv6Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl Clone for Ipv6Addr { + fn clone(&self) -> Ipv6Addr { *self } +} +impl PartialEq for Ipv6Addr { + fn eq(&self, other: &Ipv6Addr) -> bool { + self.inner.s6_addr == other.inner.s6_addr + } +} +impl PartialEq for Ipv6Addr { + fn eq(&self, other: &IpAddr) -> bool { + match other { IpAddr::V4(_) => false, IpAddr::V6(v6) => self == v6, } + } +} +impl PartialEq for IpAddr { + fn eq(&self, other: &Ipv6Addr) -> bool { + match self { IpAddr::V4(_) => false, IpAddr::V6(v6) => v6 == other, } + } +} +impl Eq for Ipv6Addr {} +impl hash::Hash for Ipv6Addr { + fn hash(&self, s: &mut H) { self.inner.s6_addr.hash(s) } +} +impl PartialOrd for Ipv6Addr { + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + Some(self.cmp(other)) + } +} +impl PartialOrd for IpAddr { + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + match self { + IpAddr::V4(_) => Some(Ordering::Less), + IpAddr::V6(v6) => v6.partial_cmp(other), + } + } +} +impl PartialOrd for Ipv6Addr { + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(_) => Some(Ordering::Greater), + IpAddr::V6(v6) => self.partial_cmp(v6), + } + } +} +impl Ord for Ipv6Addr { + fn cmp(&self, other: &Ipv6Addr) -> Ordering { + self.segments().cmp(&other.segments()) + } +} +impl AsInner for Ipv6Addr { + fn as_inner(&self) -> &c::in6_addr { &self.inner } +} +impl FromInner for Ipv6Addr { + fn from_inner(addr: c::in6_addr) -> Ipv6Addr { Ipv6Addr { inner: addr } } +} +impl From for u128 { + fn from(ip: Ipv6Addr) -> u128 { + let ip = ip.octets(); + u128::from_be_bytes(ip) + } +} +impl From for Ipv6Addr { + fn from(ip: u128) -> Ipv6Addr { Ipv6Addr::from(ip.to_be_bytes()) } +} +impl From<[u8; 16]> for Ipv6Addr { + fn from(octets: [u8; 16]) -> Ipv6Addr { + let inner = c::in6_addr { s6_addr: octets }; + Ipv6Addr::from_inner(inner) + } +} +impl From<[u16; 8]> for Ipv6Addr { + fn from(segments: [u16; 8]) -> Ipv6Addr { + let [a, b, c, d, e, f, g, h] = segments; + Ipv6Addr::new(a, b, c, d, e, f, g, h) + } +} +impl From<[u8; 16]> for IpAddr { + fn from(octets: [u8; 16]) -> IpAddr { IpAddr::V6(Ipv6Addr::from(octets)) } +} +impl From<[u16; 8]> for IpAddr { + fn from(segments: [u16; 8]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(segments)) + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/output.rustfmt.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/output.rustfmt.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/output.rustfmt.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/examples/output.rustfmt.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,552 @@ +use crate::cmp::Ordering; +use crate::fmt::{self, Write as FmtWrite}; +use crate::hash; +use crate::io::Write as IoWrite; +use crate::mem::transmute; +use crate::sys::net::netc as c; +use crate::sys_common::{AsInner, FromInner, IntoInner}; +#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] +pub enum IpAddr { + V4(Ipv4Addr), + V6(Ipv6Addr), +} +#[derive(Copy)] +pub struct Ipv4Addr { + inner: c::in_addr, +} +#[derive(Copy)] +pub struct Ipv6Addr { + inner: c::in6_addr, +} +#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)] +#[non_exhaustive] +pub enum Ipv6MulticastScope { + InterfaceLocal, + LinkLocal, + RealmLocal, + AdminLocal, + SiteLocal, + OrganizationLocal, + Global, +} +impl IpAddr { + pub const fn is_unspecified(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_unspecified(), + IpAddr::V6(ip) => ip.is_unspecified(), + } + } + pub const fn is_loopback(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_loopback(), + IpAddr::V6(ip) => ip.is_loopback(), + } + } + pub const fn is_global(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_global(), + IpAddr::V6(ip) => ip.is_global(), + } + } + pub const fn is_multicast(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_multicast(), + IpAddr::V6(ip) => ip.is_multicast(), + } + } + pub const fn is_documentation(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_documentation(), + IpAddr::V6(ip) => ip.is_documentation(), + } + } + pub const fn is_benchmarking(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_benchmarking(), + IpAddr::V6(ip) => ip.is_benchmarking(), + } + } + pub const fn is_ipv4(&self) -> bool { + matches!(self, IpAddr::V4(_)) + } + pub const fn is_ipv6(&self) -> bool { + matches!(self, IpAddr::V6(_)) + } + pub const fn to_canonical(&self) -> IpAddr { + match self { + &v4 @ IpAddr::V4(_) => v4, + IpAddr::V6(v6) => v6.to_canonical(), + } + } +} +impl Ipv4Addr { + pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr { + Ipv4Addr { + inner: c::in_addr { + s_addr: u32::from_ne_bytes([a, b, c, d]), + }, + } + } + pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1); + #[doc(alias = "INADDR_ANY")] + pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0); + pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255); + pub const fn octets(&self) -> [u8; 4] { + self.inner.s_addr.to_ne_bytes() + } + pub const fn is_unspecified(&self) -> bool { + self.inner.s_addr == 0 + } + pub const fn is_loopback(&self) -> bool { + self.octets()[0] == 127 + } + pub const fn is_private(&self) -> bool { + match self.octets() { + [10, ..] => true, + [172, b, ..] if b >= 16 && b <= 31 => true, + [192, 168, ..] => true, + _ => false, + } + } + pub const fn is_link_local(&self) -> bool { + matches!(self.octets(), [169, 254, ..]) + } + pub const fn is_global(&self) -> bool { + if u32::from_be_bytes(self.octets()) == 0xc0000009 + || u32::from_be_bytes(self.octets()) == 0xc000000a + { + return true; + } + !self.is_private() + && !self.is_loopback() + && !self.is_link_local() + && !self.is_broadcast() + && !self.is_documentation() + && !self.is_shared() + && !(self.octets()[0] == 192 && self.octets()[1] == 0 && self.octets()[2] == 0) + && !self.is_reserved() + && !self.is_benchmarking() + && self.octets()[0] != 0 + } + pub const fn is_shared(&self) -> bool { + self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000) + } + pub const fn is_benchmarking(&self) -> bool { + self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18 + } + pub const fn is_reserved(&self) -> bool { + self.octets()[0] & 240 == 240 && !self.is_broadcast() + } + pub const fn is_multicast(&self) -> bool { + self.octets()[0] >= 224 && self.octets()[0] <= 239 + } + pub const fn is_broadcast(&self) -> bool { + u32::from_be_bytes(self.octets()) == u32::from_be_bytes(Self::BROADCAST.octets()) + } + pub const fn is_documentation(&self) -> bool { + matches!( + self.octets(), + [192, 0, 2, _] | [198, 51, 100, _] | [203, 0, 113, _] + ) + } + pub const fn to_ipv6_compatible(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { + inner: c::in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d], + }, + } + } + pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { + inner: c::in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, d], + }, + } + } +} +impl fmt::Display for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + IpAddr::V4(ip) => ip.fmt(fmt), + IpAddr::V6(ip) => ip.fmt(fmt), + } + } +} +impl fmt::Debug for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl From for IpAddr { + fn from(ipv4: Ipv4Addr) -> IpAddr { + IpAddr::V4(ipv4) + } +} +impl From for IpAddr { + fn from(ipv6: Ipv6Addr) -> IpAddr { + IpAddr::V6(ipv6) + } +} +impl fmt::Display for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let octets = self.octets(); + if fmt.precision().is_none() && fmt.width().is_none() { + write!( + fmt, + "{}.{}.{}.{}", + octets[0], octets[1], octets[2], octets[3] + ) + } else { + const IPV4_BUF_LEN: usize = 15; + let mut buf = [0u8; IPV4_BUF_LEN]; + let mut buf_slice = &mut buf[..]; + write!( + buf_slice, + "{}.{}.{}.{}", + octets[0], octets[1], octets[2], octets[3] + ) + .unwrap(); + let len = IPV4_BUF_LEN - buf_slice.len(); + let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; + fmt.pad(buf) + } + } +} +impl fmt::Debug for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl Clone for Ipv4Addr { + fn clone(&self) -> Ipv4Addr { + *self + } +} +impl PartialEq for Ipv4Addr { + fn eq(&self, other: &Ipv4Addr) -> bool { + self.inner.s_addr == other.inner.s_addr + } +} +impl PartialEq for IpAddr { + fn eq(&self, other: &Ipv4Addr) -> bool { + match self { + IpAddr::V4(v4) => v4 == other, + IpAddr::V6(_) => false, + } + } +} +impl PartialEq for Ipv4Addr { + fn eq(&self, other: &IpAddr) -> bool { + match other { + IpAddr::V4(v4) => self == v4, + IpAddr::V6(_) => false, + } + } +} +impl Eq for Ipv4Addr {} +impl hash::Hash for Ipv4Addr { + fn hash(&self, s: &mut H) { + { self.inner.s_addr }.hash(s) + } +} +impl PartialOrd for Ipv4Addr { + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + Some(self.cmp(other)) + } +} +impl PartialOrd for IpAddr { + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + match self { + IpAddr::V4(v4) => v4.partial_cmp(other), + IpAddr::V6(_) => Some(Ordering::Greater), + } + } +} +impl PartialOrd for Ipv4Addr { + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(v4) => self.partial_cmp(v4), + IpAddr::V6(_) => Some(Ordering::Less), + } + } +} +impl Ord for Ipv4Addr { + fn cmp(&self, other: &Ipv4Addr) -> Ordering { + u32::from_be(self.inner.s_addr).cmp(&u32::from_be(other.inner.s_addr)) + } +} +impl IntoInner for Ipv4Addr { + fn into_inner(self) -> c::in_addr { + self.inner + } +} +impl From for u32 { + fn from(ip: Ipv4Addr) -> u32 { + let ip = ip.octets(); + u32::from_be_bytes(ip) + } +} +impl From for Ipv4Addr { + fn from(ip: u32) -> Ipv4Addr { + Ipv4Addr::from(ip.to_be_bytes()) + } +} +impl From<[u8; 4]> for Ipv4Addr { + fn from(octets: [u8; 4]) -> Ipv4Addr { + Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3]) + } +} +impl From<[u8; 4]> for IpAddr { + fn from(octets: [u8; 4]) -> IpAddr { + IpAddr::V4(Ipv4Addr::from(octets)) + } +} +impl Ipv6Addr { + pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr { + let addr16 = [ + a.to_be(), + b.to_be(), + c.to_be(), + d.to_be(), + e.to_be(), + f.to_be(), + g.to_be(), + h.to_be(), + ]; + Ipv6Addr { + inner: c::in6_addr { + s6_addr: unsafe { transmute::<_, [u8; 16]>(addr16) }, + }, + } + } + pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0); + pub const fn segments(&self) -> [u16; 8] { + let [a, b, c, d, e, f, g, h] = unsafe { transmute::<_, [u16; 8]>(self.inner.s6_addr) }; + [ + u16::from_be(a), + u16::from_be(b), + u16::from_be(c), + u16::from_be(d), + u16::from_be(e), + u16::from_be(f), + u16::from_be(g), + u16::from_be(h), + ] + } + pub const fn is_unspecified(&self) -> bool { + u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets()) + } + pub const fn is_loopback(&self) -> bool { + u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets()) + } + pub const fn is_global(&self) -> bool { + match self.multicast_scope() { + Some(Ipv6MulticastScope::Global) => true, + None => self.is_unicast_global(), + _ => false, + } + } + pub const fn is_unique_local(&self) -> bool { + (self.segments()[0] & 0xfe00) == 0xfc00 + } + pub const fn is_unicast(&self) -> bool { + !self.is_multicast() + } + pub const fn is_unicast_link_local(&self) -> bool { + (self.segments()[0] & 0xffc0) == 0xfe80 + } + pub const fn is_documentation(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) + } + pub const fn is_benchmarking(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) && (self.segments()[2] == 0) + } + pub const fn is_unicast_global(&self) -> bool { + self.is_unicast() + && !self.is_loopback() + && !self.is_unicast_link_local() + && !self.is_unique_local() + && !self.is_unspecified() + && !self.is_documentation() + } + pub const fn multicast_scope(&self) -> Option { + if self.is_multicast() { + match self.segments()[0] & 0x000f { + 1 => Some(Ipv6MulticastScope::InterfaceLocal), + 2 => Some(Ipv6MulticastScope::LinkLocal), + 3 => Some(Ipv6MulticastScope::RealmLocal), + 4 => Some(Ipv6MulticastScope::AdminLocal), + 5 => Some(Ipv6MulticastScope::SiteLocal), + 8 => Some(Ipv6MulticastScope::OrganizationLocal), + 14 => Some(Ipv6MulticastScope::Global), + _ => None, + } + } else { + None + } + } + pub const fn is_multicast(&self) -> bool { + (self.segments()[0] & 0xff00) == 0xff00 + } + pub const fn to_ipv4_mapped(&self) -> Option { + match self.octets() { + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { + Some(Ipv4Addr::new(a, b, c, d)) + } + _ => None, + } + } + pub const fn to_ipv4(&self) -> Option { + if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() { + let [a, b] = ab.to_be_bytes(); + let [c, d] = cd.to_be_bytes(); + Some(Ipv4Addr::new(a, b, c, d)) + } else { + None + } + } + pub const fn to_canonical(&self) -> IpAddr { + if let Some(mapped) = self.to_ipv4_mapped() { + return IpAddr::V4(mapped); + } + IpAddr::V6(*self) + } + pub const fn octets(&self) -> [u8; 16] { + self.inner.s6_addr + } +} +impl fmt::Display for Ipv6Addr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if f.precision().is_none() && f.width().is_none() { + let segments = self.segments(); + if self.is_unspecified() { + f.write_str("::") + } else if self.is_loopback() { + f.write_str("::1") + } else if let Some(ipv4) = self.to_ipv4() { + match segments[5] { + 0 => write!(f, "::{}", ipv4), + 0xffff => write!(f, "::ffff:{}", ipv4), + _ => unreachable!(), + } + } else { # [derive (Copy , Clone , Default)] struct Span { start : usize , len : usize , } let zeroes = { let mut longest = Span :: default () ; let mut current = Span :: default () ; for (i , & segment) in segments . iter () . enumerate () { if segment == 0 { if current . len == 0 { current . start = i ; } current . len += 1 ; if current . len > longest . len { longest = current ; } } else { current = Span :: default () ; } } longest } ; # [doc = " Write a colon-separated part of the address"] # [inline] fn fmt_subslice (f : & mut fmt :: Formatter < '_ > , chunk : & [u16]) -> fmt :: Result { if let Some ((first , tail)) = chunk . split_first () { write ! (f , "{:x}" , first) ? ; for segment in tail { f . write_char (':') ? ; write ! (f , "{:x}" , segment) ? ; } } Ok (()) } if zeroes . len > 1 { fmt_subslice (f , & segments [.. zeroes . start]) ? ; f . write_str ("::") ? ; fmt_subslice (f , & segments [zeroes . start + zeroes . len ..]) } else { fmt_subslice (f , & segments) } } + } else { + const IPV6_BUF_LEN: usize = (4 * 8) + 7; + let mut buf = [0u8; IPV6_BUF_LEN]; + let mut buf_slice = &mut buf[..]; + write!(buf_slice, "{}", self).unwrap(); + let len = IPV6_BUF_LEN - buf_slice.len(); + let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; + f.pad(buf) + } + } +} +impl fmt::Debug for Ipv6Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl Clone for Ipv6Addr { + fn clone(&self) -> Ipv6Addr { + *self + } +} +impl PartialEq for Ipv6Addr { + fn eq(&self, other: &Ipv6Addr) -> bool { + self.inner.s6_addr == other.inner.s6_addr + } +} +impl PartialEq for Ipv6Addr { + fn eq(&self, other: &IpAddr) -> bool { + match other { + IpAddr::V4(_) => false, + IpAddr::V6(v6) => self == v6, + } + } +} +impl PartialEq for IpAddr { + fn eq(&self, other: &Ipv6Addr) -> bool { + match self { + IpAddr::V4(_) => false, + IpAddr::V6(v6) => v6 == other, + } + } +} +impl Eq for Ipv6Addr {} +impl hash::Hash for Ipv6Addr { + fn hash(&self, s: &mut H) { + self.inner.s6_addr.hash(s) + } +} +impl PartialOrd for Ipv6Addr { + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + Some(self.cmp(other)) + } +} +impl PartialOrd for IpAddr { + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + match self { + IpAddr::V4(_) => Some(Ordering::Less), + IpAddr::V6(v6) => v6.partial_cmp(other), + } + } +} +impl PartialOrd for Ipv6Addr { + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(_) => Some(Ordering::Greater), + IpAddr::V6(v6) => self.partial_cmp(v6), + } + } +} +impl Ord for Ipv6Addr { + fn cmp(&self, other: &Ipv6Addr) -> Ordering { + self.segments().cmp(&other.segments()) + } +} +impl AsInner for Ipv6Addr { + fn as_inner(&self) -> &c::in6_addr { + &self.inner + } +} +impl FromInner for Ipv6Addr { + fn from_inner(addr: c::in6_addr) -> Ipv6Addr { + Ipv6Addr { inner: addr } + } +} +impl From for u128 { + fn from(ip: Ipv6Addr) -> u128 { + let ip = ip.octets(); + u128::from_be_bytes(ip) + } +} +impl From for Ipv6Addr { + fn from(ip: u128) -> Ipv6Addr { + Ipv6Addr::from(ip.to_be_bytes()) + } +} +impl From<[u8; 16]> for Ipv6Addr { + fn from(octets: [u8; 16]) -> Ipv6Addr { + let inner = c::in6_addr { s6_addr: octets }; + Ipv6Addr::from_inner(inner) + } +} +impl From<[u16; 8]> for Ipv6Addr { + fn from(segments: [u16; 8]) -> Ipv6Addr { + let [a, b, c, d, e, f, g, h] = segments; + Ipv6Addr::new(a, b, c, d, e, f, g, h) + } +} +impl From<[u8; 16]> for IpAddr { + fn from(octets: [u8; 16]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(octets)) + } +} +impl From<[u16; 8]> for IpAddr { + fn from(segments: [u16; 8]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(segments)) + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/LICENSE-APACHE clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/LICENSE-APACHE --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/LICENSE-APACHE 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/LICENSE-MIT clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/LICENSE-MIT --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/LICENSE-MIT 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/README.md clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/README.md --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/README.md 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/README.md 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,312 @@ +prettyplease::unparse +===================== + +[github](https://github.com/dtolnay/prettyplease) +[crates.io](https://crates.io/crates/prettyplease) +[docs.rs](https://docs.rs/prettyplease) +[build status](https://github.com/dtolnay/prettyplease/actions?query=branch%3Amaster) + +A minimal `syn` syntax tree pretty-printer. + +
+ +## Overview + +This is a pretty-printer to turn a `syn` syntax tree into a `String` of +well-formatted source code. In contrast to rustfmt, this library is intended to +be suitable for arbitrary generated code. + +Rustfmt prioritizes high-quality output that is impeccable enough that you'd be +comfortable spending your career staring at its output — but that means +some heavyweight algorithms, and it has a tendency to bail out on code that is +hard to format (for example [rustfmt#3697], and there are dozens more issues +like it). That's not necessarily a big deal for human-generated code because +when code gets highly nested, the human will naturally be inclined to refactor +into more easily formattable code. But for generated code, having the formatter +just give up leaves it totally unreadable. + +[rustfmt#3697]: https://github.com/rust-lang/rustfmt/issues/3697 + +This library is designed using the simplest possible algorithm and data +structures that can deliver about 95% of the quality of rustfmt-formatted +output. In my experience testing real-world code, approximately 97-98% of output +lines come out identical between rustfmt's formatting and this crate's. The rest +have slightly different linebreak decisions, but still clearly follow the +dominant modern Rust style. + +The tradeoffs made by this crate are a good fit for generated code that you will +*not* spend your career staring at. For example, the output of `bindgen`, or the +output of `cargo-expand`. In those cases it's more important that the whole +thing be formattable without the formatter giving up, than that it be flawless. + +
+ +## Feature matrix + +Here are a few superficial comparisons of this crate against the AST +pretty-printer built into rustc, and rustfmt. The sections below go into more +detail comparing the output of each of these libraries. + +| | prettyplease | rustc | rustfmt | +|:---|:---:|:---:|:---:| +| non-pathological behavior on big or generated code | 💚 | ❌ | ❌ | +| idiomatic modern formatting ("locally indistinguishable from rustfmt") | 💚 | ❌ | 💚 | +| throughput | 60 MB/s | 39 MB/s | 2.8 MB/s | +| number of dependencies | 3 | 72 | 66 | +| compile time including dependencies | 2.4 sec | 23.1 sec | 29.8 sec | +| buildable using a stable Rust compiler | 💚 | ❌ | ❌ | +| published to crates.io | 💚 | ❌ | ❌ | +| extensively configurable output | ❌ | ❌ | 💚 | +| intended to accommodate hand-maintained source code | ❌ | ❌ | 💚 | + +
+ +## Comparison to rustfmt + +- [input.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/input.rs) +- [output.prettyplease.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.prettyplease.rs) +- [output.rustfmt.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.rustfmt.rs) + +If you weren't told which output file is which, it would be practically +impossible to tell — **except** for line 435 in the rustfmt output, which +is more than 1000 characters long because rustfmt just gave up formatting that +part of the file: + +```rust + match segments[5] { + 0 => write!(f, "::{}", ipv4), + 0xffff => write!(f, "::ffff:{}", ipv4), + _ => unreachable!(), + } + } else { # [derive (Copy , Clone , Default)] struct Span { start : usize , len : usize , } let zeroes = { let mut longest = Span :: default () ; let mut current = Span :: default () ; for (i , & segment) in segments . iter () . enumerate () { if segment == 0 { if current . len == 0 { current . start = i ; } current . len += 1 ; if current . len > longest . len { longest = current ; } } else { current = Span :: default () ; } } longest } ; # [doc = " Write a colon-separated part of the address"] # [inline] fn fmt_subslice (f : & mut fmt :: Formatter < '_ > , chunk : & [u16]) -> fmt :: Result { if let Some ((first , tail)) = chunk . split_first () { write ! (f , "{:x}" , first) ? ; for segment in tail { f . write_char (':') ? ; write ! (f , "{:x}" , segment) ? ; } } Ok (()) } if zeroes . len > 1 { fmt_subslice (f , & segments [.. zeroes . start]) ? ; f . write_str ("::") ? ; fmt_subslice (f , & segments [zeroes . start + zeroes . len ..]) } else { fmt_subslice (f , & segments) } } + } else { + const IPV6_BUF_LEN: usize = (4 * 8) + 7; + let mut buf = [0u8; IPV6_BUF_LEN]; + let mut buf_slice = &mut buf[..]; +``` + +This is a pretty typical manifestation of rustfmt bailing out in generated code +— a chunk of the input ends up on one line. The other manifestation is +that you're working on some code, running rustfmt on save like a conscientious +developer, but after a while notice it isn't doing anything. You introduce an +intentional formatting issue, like a stray indent or semicolon, and run rustfmt +to check your suspicion. Nope, it doesn't get cleaned up — rustfmt is just +not formatting the part of the file you are working on. + +The prettyplease library is designed to have no pathological cases that force a +bail out; the entire input you give it will get formatted in some "good enough" +form. + +Separately, rustfmt can be problematic to integrate into projects. It's written +using rustc's internal syntax tree, so it can't be built by a stable compiler. +Its releases are not regularly published to crates.io, so in Cargo builds you'd +need to depend on it as a git dependency, which precludes publishing your crate +to crates.io also. You can shell out to a `rustfmt` binary, but that'll be +whatever rustfmt version is installed on each developer's system (if any), which +can lead to spurious diffs in checked-in generated code formatted by different +versions. In contrast prettyplease is designed to be easy to pull in as a +library, and compiles fast. + +
+ +## Comparison to rustc_ast_pretty + +- [input.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/input.rs) +- [output.prettyplease.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.prettyplease.rs) +- [output.rustc.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.rustc.rs) + +This is the pretty-printer that gets used when rustc prints source code, such as +`rustc -Zunpretty=expanded`. It's used also by the standard library's +`stringify!` when stringifying an interpolated macro_rules AST fragment, like an +$:expr, and transitively by `dbg!` and many macros in the ecosystem. + +Rustc's formatting is mostly okay, but does not hew closely to the dominant +contemporary style of Rust formatting. Some things wouldn't ever be written on +one line, like this `match` expression, and certainly not with a comma in front +of the closing brace: + +```rust +fn eq(&self, other: &IpAddr) -> bool { + match other { IpAddr::V4(v4) => self == v4, IpAddr::V6(_) => false, } +} +``` + +Some places use non-multiple-of-4 indentation, which is definitely not the norm: + +```rust +pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr{inner: + c::in6_addr{s6_addr: + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, + 0xFF, a, b, c, d],},} +} +``` + +And although there isn't an egregious example of it in the link because the +input code is pretty tame, in general rustc_ast_pretty has pathological behavior +on generated code. It has a tendency to use excessive horizontal indentation and +rapidly run out of width: + +```rust +::std::io::_print(::core::fmt::Arguments::new_v1(&[""], + &match (&msg,) { + _args => + [::core::fmt::ArgumentV1::new(_args.0, + ::core::fmt::Display::fmt)], + })); +``` + +The snippets above are clearly different from modern rustfmt style. In contrast, +prettyplease is designed to have output that is practically indistinguishable +from rustfmt-formatted code. + +
+ +## Example + +```rust +// [dependencies] +// prettyplease = "0.2" +// syn = { version = "2", default-features = false, features = ["full", "parsing"] } + +const INPUT: &str = stringify! { + use crate::{ + lazy::{Lazy, SyncLazy, SyncOnceCell}, panic, + sync::{ atomic::{AtomicUsize, Ordering::SeqCst}, + mpsc::channel, Mutex, }, + thread, + }; + impl Into for T where U: From { + fn into(self) -> U { U::from(self) } + } +}; + +fn main() { + let syntax_tree = syn::parse_file(INPUT).unwrap(); + let formatted = prettyplease::unparse(&syntax_tree); + print!("{}", formatted); +} +``` + +
+ +## Algorithm notes + +The approach and terminology used in the implementation are derived from [*Derek +C. Oppen, "Pretty Printing" (1979)*][paper], on which rustc_ast_pretty is also +based, and from rustc_ast_pretty's implementation written by Graydon Hoare in +2011 (and modernized over the years by dozens of volunteer maintainers). + +[paper]: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/770/CS-TR-79-770.pdf + +The paper describes two language-agnostic interacting procedures `Scan()` and +`Print()`. Language-specific code decomposes an input data structure into a +stream of `string` and `break` tokens, and `begin` and `end` tokens for +grouping. Each `begin`–`end` range may be identified as either "consistent +breaking" or "inconsistent breaking". If a group is consistently breaking, then +if the whole contents do not fit on the line, *every* `break` token in the group +will receive a linebreak. This is appropriate, for example, for Rust struct +literals, or arguments of a function call. If a group is inconsistently +breaking, then the `string` tokens in the group are greedily placed on the line +until out of space, and linebroken only at those `break` tokens for which the +next string would not fit. For example, this is appropriate for the contents of +a braced `use` statement in Rust. + +Scan's job is to efficiently accumulate sizing information about groups and +breaks. For every `begin` token we compute the distance to the matched `end` +token, and for every `break` we compute the distance to the next `break`. The +algorithm uses a ringbuffer to hold tokens whose size is not yet ascertained. +The maximum size of the ringbuffer is bounded by the target line length and does +not grow indefinitely, regardless of deep nesting in the input stream. That's +because once a group is sufficiently big, the precise size can no longer make a +difference to linebreak decisions and we can effectively treat it as "infinity". + +Print's job is to use the sizing information to efficiently assign a "broken" or +"not broken" status to every `begin` token. At that point the output is easily +constructed by concatenating `string` tokens and breaking at `break` tokens +contained within a broken group. + +Leveraging these primitives (i.e. cleverly placing the all-or-nothing consistent +breaks and greedy inconsistent breaks) to yield rustfmt-compatible formatting +for all of Rust's syntax tree nodes is a fun challenge. + +Here is a visualization of some Rust tokens fed into the pretty printing +algorithm. Consistently breaking `begin`—`end` pairs are represented by +`«`⁠`»`, inconsistently breaking by `‹`⁠`›`, `break` by `·`, and the +rest of the non-whitespace are `string`. + +```text +use crate::«{· +‹ lazy::«{·‹Lazy,· SyncLazy,· SyncOnceCell›·}»,· + panic,· + sync::«{· +‹ atomic::«{·‹AtomicUsize,· Ordering::SeqCst›·}»,· + mpsc::channel,· Mutex›,· + }»,· + thread›,· +}»;· +«‹«impl<«·T‹›,· U‹›·»>» Into<«·U·»>· for T›· +where· + U:‹ From<«·T·»>›,· +{· +« fn into(·«·self·») -> U {· +‹ U::from(«·self·»)›· +» }· +»}· +``` + +The algorithm described in the paper is not quite sufficient for producing +well-formatted Rust code that is locally indistinguishable from rustfmt's style. +The reason is that in the paper, the complete non-whitespace contents are +assumed to be independent of linebreak decisions, with Scan and Print being only +in control of the whitespace (spaces and line breaks). In Rust as idiomatically +formattted by rustfmt, that is not the case. Trailing commas are one example; +the punctuation is only known *after* the broken vs non-broken status of the +surrounding group is known: + +```rust +let _ = Struct { x: 0, y: true }; + +let _ = Struct { + x: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx, + y: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyy, //<- trailing comma if the expression wrapped +}; +``` + +The formatting of `match` expressions is another case; we want small arms on the +same line as the pattern, and big arms wrapped in a brace. The presence of the +brace punctuation, comma, and semicolon are all dependent on whether the arm +fits on the line: + +```rust +match total_nanos.checked_add(entry.nanos as u64) { + Some(n) => tmp = n, //<- small arm, inline with comma + None => { + total_secs = total_secs + .checked_add(total_nanos / NANOS_PER_SEC as u64) + .expect("overflow in iter::sum over durations"); + } //<- big arm, needs brace added, and also semicolon^ +} +``` + +The printing algorithm implementation in this crate accommodates all of these +situations with conditional punctuation tokens whose selection can be deferred +and populated after it's known that the group is or is not broken. + +
+ +#### License + + +Licensed under either of
Apache License, Version +2.0 or MIT license at your option. + + +
+ + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. + diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/algorithm.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/algorithm.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/algorithm.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/algorithm.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,376 @@ +// Adapted from https://github.com/rust-lang/rust/blob/1.57.0/compiler/rustc_ast_pretty/src/pp.rs. +// See "Algorithm notes" in the crate-level rustdoc. + +use crate::ring::RingBuffer; +use crate::{MARGIN, MIN_SPACE}; +use std::borrow::Cow; +use std::cmp; +use std::collections::VecDeque; +use std::iter; + +#[derive(Clone, Copy, PartialEq)] +pub enum Breaks { + Consistent, + Inconsistent, +} + +#[derive(Clone, Copy, Default)] +pub struct BreakToken { + pub offset: isize, + pub blank_space: usize, + pub pre_break: Option, + pub post_break: Option, + pub no_break: Option, + pub if_nonempty: bool, + pub never_break: bool, +} + +#[derive(Clone, Copy)] +pub struct BeginToken { + pub offset: isize, + pub breaks: Breaks, +} + +#[derive(Clone)] +pub enum Token { + String(Cow<'static, str>), + Break(BreakToken), + Begin(BeginToken), + End, +} + +#[derive(Copy, Clone)] +enum PrintFrame { + Fits(Breaks), + Broken(usize, Breaks), +} + +pub const SIZE_INFINITY: isize = 0xffff; + +pub struct Printer { + out: String, + // Number of spaces left on line + space: isize, + // Ring-buffer of tokens and calculated sizes + buf: RingBuffer, + // Total size of tokens already printed + left_total: isize, + // Total size of tokens enqueued, including printed and not yet printed + right_total: isize, + // Holds the ring-buffer index of the Begin that started the current block, + // possibly with the most recent Break after that Begin (if there is any) on + // top of it. Values are pushed and popped on the back of the queue using it + // like stack, and elsewhere old values are popped from the front of the + // queue as they become irrelevant due to the primary ring-buffer advancing. + scan_stack: VecDeque, + // Stack of blocks-in-progress being flushed by print + print_stack: Vec, + // Level of indentation of current line + indent: usize, + // Buffered indentation to avoid writing trailing whitespace + pending_indentation: usize, +} + +#[derive(Clone)] +struct BufEntry { + token: Token, + size: isize, +} + +impl Printer { + pub fn new() -> Self { + Printer { + out: String::new(), + space: MARGIN, + buf: RingBuffer::new(), + left_total: 0, + right_total: 0, + scan_stack: VecDeque::new(), + print_stack: Vec::new(), + indent: 0, + pending_indentation: 0, + } + } + + pub fn eof(mut self) -> String { + if !self.scan_stack.is_empty() { + self.check_stack(0); + self.advance_left(); + } + self.out + } + + pub fn scan_begin(&mut self, token: BeginToken) { + if self.scan_stack.is_empty() { + self.left_total = 1; + self.right_total = 1; + self.buf.clear(); + } + let right = self.buf.push(BufEntry { + token: Token::Begin(token), + size: -self.right_total, + }); + self.scan_stack.push_back(right); + } + + pub fn scan_end(&mut self) { + if self.scan_stack.is_empty() { + self.print_end(); + } else { + if !self.buf.is_empty() { + if let Token::Break(break_token) = self.buf.last().token { + if self.buf.len() >= 2 { + if let Token::Begin(_) = self.buf.second_last().token { + self.buf.pop_last(); + self.buf.pop_last(); + self.scan_stack.pop_back(); + self.scan_stack.pop_back(); + self.right_total -= break_token.blank_space as isize; + return; + } + } + if break_token.if_nonempty { + self.buf.pop_last(); + self.scan_stack.pop_back(); + self.right_total -= break_token.blank_space as isize; + } + } + } + let right = self.buf.push(BufEntry { + token: Token::End, + size: -1, + }); + self.scan_stack.push_back(right); + } + } + + pub fn scan_break(&mut self, token: BreakToken) { + if self.scan_stack.is_empty() { + self.left_total = 1; + self.right_total = 1; + self.buf.clear(); + } else { + self.check_stack(0); + } + let right = self.buf.push(BufEntry { + token: Token::Break(token), + size: -self.right_total, + }); + self.scan_stack.push_back(right); + self.right_total += token.blank_space as isize; + } + + pub fn scan_string(&mut self, string: Cow<'static, str>) { + if self.scan_stack.is_empty() { + self.print_string(string); + } else { + let len = string.len() as isize; + self.buf.push(BufEntry { + token: Token::String(string), + size: len, + }); + self.right_total += len; + self.check_stream(); + } + } + + pub fn offset(&mut self, offset: isize) { + match &mut self.buf.last_mut().token { + Token::Break(token) => token.offset += offset, + Token::Begin(_) => {} + Token::String(_) | Token::End => unreachable!(), + } + } + + pub fn end_with_max_width(&mut self, max: isize) { + let mut depth = 1; + for &index in self.scan_stack.iter().rev() { + let entry = &self.buf[index]; + match entry.token { + Token::Begin(_) => { + depth -= 1; + if depth == 0 { + if entry.size < 0 { + let actual_width = entry.size + self.right_total; + if actual_width > max { + self.buf.push(BufEntry { + token: Token::String(Cow::Borrowed("")), + size: SIZE_INFINITY, + }); + self.right_total += SIZE_INFINITY; + } + } + break; + } + } + Token::End => depth += 1, + Token::Break(_) => {} + Token::String(_) => unreachable!(), + } + } + self.scan_end(); + } + + fn check_stream(&mut self) { + while self.right_total - self.left_total > self.space { + if *self.scan_stack.front().unwrap() == self.buf.index_of_first() { + self.scan_stack.pop_front().unwrap(); + self.buf.first_mut().size = SIZE_INFINITY; + } + + self.advance_left(); + + if self.buf.is_empty() { + break; + } + } + } + + fn advance_left(&mut self) { + while self.buf.first().size >= 0 { + let left = self.buf.pop_first(); + + match left.token { + Token::String(string) => { + self.left_total += left.size; + self.print_string(string); + } + Token::Break(token) => { + self.left_total += token.blank_space as isize; + self.print_break(token, left.size); + } + Token::Begin(token) => self.print_begin(token, left.size), + Token::End => self.print_end(), + } + + if self.buf.is_empty() { + break; + } + } + } + + fn check_stack(&mut self, mut depth: usize) { + while let Some(&index) = self.scan_stack.back() { + let mut entry = &mut self.buf[index]; + match entry.token { + Token::Begin(_) => { + if depth == 0 { + break; + } + self.scan_stack.pop_back().unwrap(); + entry.size += self.right_total; + depth -= 1; + } + Token::End => { + self.scan_stack.pop_back().unwrap(); + entry.size = 1; + depth += 1; + } + Token::Break(_) => { + self.scan_stack.pop_back().unwrap(); + entry.size += self.right_total; + if depth == 0 { + break; + } + } + Token::String(_) => unreachable!(), + } + } + } + + fn get_top(&self) -> PrintFrame { + const OUTER: PrintFrame = PrintFrame::Broken(0, Breaks::Inconsistent); + self.print_stack.last().map_or(OUTER, PrintFrame::clone) + } + + fn print_begin(&mut self, token: BeginToken, size: isize) { + if cfg!(prettyplease_debug) { + self.out.push(match token.breaks { + Breaks::Consistent => '«', + Breaks::Inconsistent => '‹', + }); + if cfg!(prettyplease_debug_indent) { + self.out + .extend(token.offset.to_string().chars().map(|ch| match ch { + '0'..='9' => ['₀', '₁', '₂', '₃', '₄', '₅', '₆', '₇', '₈', '₉'] + [(ch as u8 - b'0') as usize], + '-' => '₋', + _ => unreachable!(), + })); + } + } + if size > self.space { + self.print_stack + .push(PrintFrame::Broken(self.indent, token.breaks)); + self.indent = usize::try_from(self.indent as isize + token.offset).unwrap(); + } else { + self.print_stack.push(PrintFrame::Fits(token.breaks)); + } + } + + fn print_end(&mut self) { + let breaks = match self.print_stack.pop().unwrap() { + PrintFrame::Broken(indent, breaks) => { + self.indent = indent; + breaks + } + PrintFrame::Fits(breaks) => breaks, + }; + if cfg!(prettyplease_debug) { + self.out.push(match breaks { + Breaks::Consistent => '»', + Breaks::Inconsistent => '›', + }); + } + } + + fn print_break(&mut self, token: BreakToken, size: isize) { + let fits = token.never_break + || match self.get_top() { + PrintFrame::Fits(..) => true, + PrintFrame::Broken(.., Breaks::Consistent) => false, + PrintFrame::Broken(.., Breaks::Inconsistent) => size <= self.space, + }; + if fits { + self.pending_indentation += token.blank_space; + self.space -= token.blank_space as isize; + if let Some(no_break) = token.no_break { + self.out.push(no_break); + self.space -= no_break.len_utf8() as isize; + } + if cfg!(prettyplease_debug) { + self.out.push('·'); + } + } else { + if let Some(pre_break) = token.pre_break { + self.print_indent(); + self.out.push(pre_break); + } + if cfg!(prettyplease_debug) { + self.out.push('·'); + } + self.out.push('\n'); + let indent = self.indent as isize + token.offset; + self.pending_indentation = usize::try_from(indent).unwrap(); + self.space = cmp::max(MARGIN - indent, MIN_SPACE); + if let Some(post_break) = token.post_break { + self.print_indent(); + self.out.push(post_break); + self.space -= post_break.len_utf8() as isize; + } + } + } + + fn print_string(&mut self, string: Cow<'static, str>) { + self.print_indent(); + self.out.push_str(&string); + self.space -= string.len() as isize; + } + + fn print_indent(&mut self) { + self.out.reserve(self.pending_indentation); + self.out + .extend(iter::repeat(' ').take(self.pending_indentation)); + self.pending_indentation = 0; + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/attr.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/attr.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/attr.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/attr.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,287 @@ +use crate::algorithm::Printer; +use crate::path::PathKind; +use crate::INDENT; +use proc_macro2::{Delimiter, Group, TokenStream, TokenTree}; +use syn::{AttrStyle, Attribute, Expr, Lit, MacroDelimiter, Meta, MetaList, MetaNameValue}; + +impl Printer { + pub fn outer_attrs(&mut self, attrs: &[Attribute]) { + for attr in attrs { + if let AttrStyle::Outer = attr.style { + self.attr(attr); + } + } + } + + pub fn inner_attrs(&mut self, attrs: &[Attribute]) { + for attr in attrs { + if let AttrStyle::Inner(_) = attr.style { + self.attr(attr); + } + } + } + + fn attr(&mut self, attr: &Attribute) { + if let Some(mut doc) = value_of_attribute("doc", attr) { + if !doc.contains('\n') + && match attr.style { + AttrStyle::Outer => !doc.starts_with('/'), + AttrStyle::Inner(_) => true, + } + { + trim_trailing_spaces(&mut doc); + self.word(match attr.style { + AttrStyle::Outer => "///", + AttrStyle::Inner(_) => "//!", + }); + self.word(doc); + self.hardbreak(); + return; + } else if can_be_block_comment(&doc) + && match attr.style { + AttrStyle::Outer => !doc.starts_with(&['*', '/'][..]), + AttrStyle::Inner(_) => true, + } + { + trim_interior_trailing_spaces(&mut doc); + self.word(match attr.style { + AttrStyle::Outer => "/**", + AttrStyle::Inner(_) => "/*!", + }); + self.word(doc); + self.word("*/"); + self.hardbreak(); + return; + } + } else if let Some(mut comment) = value_of_attribute("comment", attr) { + if !comment.contains('\n') { + trim_trailing_spaces(&mut comment); + self.word("//"); + self.word(comment); + self.hardbreak(); + return; + } else if can_be_block_comment(&comment) && !comment.starts_with(&['*', '!'][..]) { + trim_interior_trailing_spaces(&mut comment); + self.word("/*"); + self.word(comment); + self.word("*/"); + self.hardbreak(); + return; + } + } + + self.word(match attr.style { + AttrStyle::Outer => "#", + AttrStyle::Inner(_) => "#!", + }); + self.word("["); + self.meta(&attr.meta); + self.word("]"); + self.space(); + } + + fn meta(&mut self, meta: &Meta) { + match meta { + Meta::Path(path) => self.path(path, PathKind::Simple), + Meta::List(meta) => self.meta_list(meta), + Meta::NameValue(meta) => self.meta_name_value(meta), + } + } + + fn meta_list(&mut self, meta: &MetaList) { + self.path(&meta.path, PathKind::Simple); + let delimiter = match meta.delimiter { + MacroDelimiter::Paren(_) => Delimiter::Parenthesis, + MacroDelimiter::Brace(_) => Delimiter::Brace, + MacroDelimiter::Bracket(_) => Delimiter::Bracket, + }; + let group = Group::new(delimiter, meta.tokens.clone()); + self.attr_tokens(TokenStream::from(TokenTree::Group(group))); + } + + fn meta_name_value(&mut self, meta: &MetaNameValue) { + self.path(&meta.path, PathKind::Simple); + self.word(" = "); + self.expr(&meta.value); + } + + fn attr_tokens(&mut self, tokens: TokenStream) { + let mut stack = Vec::new(); + stack.push((tokens.into_iter().peekable(), Delimiter::None)); + let mut space = Self::nbsp as fn(&mut Self); + + #[derive(PartialEq)] + enum State { + Word, + Punct, + TrailingComma, + } + + use State::*; + let mut state = Word; + + while let Some((tokens, delimiter)) = stack.last_mut() { + match tokens.next() { + Some(TokenTree::Ident(ident)) => { + if let Word = state { + space(self); + } + self.ident(&ident); + state = Word; + } + Some(TokenTree::Punct(punct)) => { + let ch = punct.as_char(); + if let (Word, '=') = (state, ch) { + self.nbsp(); + } + if ch == ',' && tokens.peek().is_none() { + self.trailing_comma(true); + state = TrailingComma; + } else { + self.token_punct(ch); + if ch == '=' { + self.nbsp(); + } else if ch == ',' { + space(self); + } + state = Punct; + } + } + Some(TokenTree::Literal(literal)) => { + if let Word = state { + space(self); + } + self.token_literal(&literal); + state = Word; + } + Some(TokenTree::Group(group)) => { + let delimiter = group.delimiter(); + let stream = group.stream(); + match delimiter { + Delimiter::Parenthesis => { + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + state = Punct; + } + Delimiter::Brace => { + self.word("{"); + state = Punct; + } + Delimiter::Bracket => { + self.word("["); + state = Punct; + } + Delimiter::None => {} + } + stack.push((stream.into_iter().peekable(), delimiter)); + space = Self::space; + } + None => { + match delimiter { + Delimiter::Parenthesis => { + if state != TrailingComma { + self.zerobreak(); + } + self.offset(-INDENT); + self.end(); + self.word(")"); + state = Punct; + } + Delimiter::Brace => { + self.word("}"); + state = Punct; + } + Delimiter::Bracket => { + self.word("]"); + state = Punct; + } + Delimiter::None => {} + } + stack.pop(); + if stack.is_empty() { + space = Self::nbsp; + } + } + } + } + } +} + +fn value_of_attribute(requested: &str, attr: &Attribute) -> Option { + let value = match &attr.meta { + Meta::NameValue(meta) if meta.path.is_ident(requested) => &meta.value, + _ => return None, + }; + let lit = match value { + Expr::Lit(expr) if expr.attrs.is_empty() => &expr.lit, + _ => return None, + }; + match lit { + Lit::Str(string) => Some(string.value()), + _ => None, + } +} + +pub fn has_outer(attrs: &[Attribute]) -> bool { + for attr in attrs { + if let AttrStyle::Outer = attr.style { + return true; + } + } + false +} + +pub fn has_inner(attrs: &[Attribute]) -> bool { + for attr in attrs { + if let AttrStyle::Inner(_) = attr.style { + return true; + } + } + false +} + +fn trim_trailing_spaces(doc: &mut String) { + doc.truncate(doc.trim_end_matches(' ').len()); +} + +fn trim_interior_trailing_spaces(doc: &mut String) { + if !doc.contains(" \n") { + return; + } + let mut trimmed = String::with_capacity(doc.len()); + let mut lines = doc.split('\n').peekable(); + while let Some(line) = lines.next() { + if lines.peek().is_some() { + trimmed.push_str(line.trim_end_matches(' ')); + trimmed.push('\n'); + } else { + trimmed.push_str(line); + } + } + *doc = trimmed; +} + +fn can_be_block_comment(value: &str) -> bool { + let mut depth = 0usize; + let bytes = value.as_bytes(); + let mut i = 0usize; + let upper = bytes.len() - 1; + + while i < upper { + if bytes[i] == b'/' && bytes[i + 1] == b'*' { + depth += 1; + i += 2; + } else if bytes[i] == b'*' && bytes[i + 1] == b'/' { + if depth == 0 { + return false; + } + depth -= 1; + i += 2; + } else { + i += 1; + } + } + + depth == 0 +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/convenience.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/convenience.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/convenience.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/convenience.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,98 @@ +use crate::algorithm::{self, BeginToken, BreakToken, Breaks, Printer}; +use std::borrow::Cow; + +impl Printer { + pub fn ibox(&mut self, indent: isize) { + self.scan_begin(BeginToken { + offset: indent, + breaks: Breaks::Inconsistent, + }); + } + + pub fn cbox(&mut self, indent: isize) { + self.scan_begin(BeginToken { + offset: indent, + breaks: Breaks::Consistent, + }); + } + + pub fn end(&mut self) { + self.scan_end(); + } + + pub fn word>>(&mut self, wrd: S) { + let s = wrd.into(); + self.scan_string(s); + } + + fn spaces(&mut self, n: usize) { + self.scan_break(BreakToken { + blank_space: n, + ..BreakToken::default() + }); + } + + pub fn zerobreak(&mut self) { + self.spaces(0); + } + + pub fn space(&mut self) { + self.spaces(1); + } + + pub fn nbsp(&mut self) { + self.word(" "); + } + + pub fn hardbreak(&mut self) { + self.spaces(algorithm::SIZE_INFINITY as usize); + } + + pub fn space_if_nonempty(&mut self) { + self.scan_break(BreakToken { + blank_space: 1, + if_nonempty: true, + ..BreakToken::default() + }); + } + + pub fn hardbreak_if_nonempty(&mut self) { + self.scan_break(BreakToken { + blank_space: algorithm::SIZE_INFINITY as usize, + if_nonempty: true, + ..BreakToken::default() + }); + } + + pub fn trailing_comma(&mut self, is_last: bool) { + if is_last { + self.scan_break(BreakToken { + pre_break: Some(','), + ..BreakToken::default() + }); + } else { + self.word(","); + self.space(); + } + } + + pub fn trailing_comma_or_space(&mut self, is_last: bool) { + if is_last { + self.scan_break(BreakToken { + blank_space: 1, + pre_break: Some(','), + ..BreakToken::default() + }); + } else { + self.word(","); + self.space(); + } + } + + pub fn neverbreak(&mut self) { + self.scan_break(BreakToken { + never_break: true, + ..BreakToken::default() + }); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/data.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/data.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/data.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/data.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,78 @@ +use crate::algorithm::Printer; +use crate::iter::IterDelimited; +use crate::path::PathKind; +use crate::INDENT; +use syn::{Field, Fields, FieldsUnnamed, Variant, VisRestricted, Visibility}; + +impl Printer { + pub fn variant(&mut self, variant: &Variant) { + self.outer_attrs(&variant.attrs); + self.ident(&variant.ident); + match &variant.fields { + Fields::Named(fields) => { + self.nbsp(); + self.word("{"); + self.cbox(INDENT); + self.space(); + for field in fields.named.iter().delimited() { + self.field(&field); + self.trailing_comma_or_space(field.is_last); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + Fields::Unnamed(fields) => { + self.cbox(INDENT); + self.fields_unnamed(fields); + self.end(); + } + Fields::Unit => {} + } + if let Some((_eq_token, discriminant)) = &variant.discriminant { + self.word(" = "); + self.expr(discriminant); + } + } + + pub fn fields_unnamed(&mut self, fields: &FieldsUnnamed) { + self.word("("); + self.zerobreak(); + for field in fields.unnamed.iter().delimited() { + self.field(&field); + self.trailing_comma(field.is_last); + } + self.offset(-INDENT); + self.word(")"); + } + + pub fn field(&mut self, field: &Field) { + self.outer_attrs(&field.attrs); + self.visibility(&field.vis); + if let Some(ident) = &field.ident { + self.ident(ident); + self.word(": "); + } + self.ty(&field.ty); + } + + pub fn visibility(&mut self, vis: &Visibility) { + match vis { + Visibility::Public(_) => self.word("pub "), + Visibility::Restricted(vis) => self.vis_restricted(vis), + Visibility::Inherited => {} + } + } + + fn vis_restricted(&mut self, vis: &VisRestricted) { + self.word("pub("); + let omit_in = vis.path.get_ident().map_or(false, |ident| { + matches!(ident.to_string().as_str(), "self" | "super" | "crate") + }); + if !omit_in { + self.word("in "); + } + self.path(&vis.path, PathKind::Simple); + self.word(") "); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/expr.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/expr.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/expr.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/expr.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,1160 @@ +use crate::algorithm::{BreakToken, Printer}; +use crate::attr; +use crate::iter::IterDelimited; +use crate::path::PathKind; +use crate::stmt; +use crate::INDENT; +use proc_macro2::TokenStream; +use syn::punctuated::Punctuated; +use syn::{ + token, Arm, Attribute, BinOp, Block, Expr, ExprArray, ExprAssign, ExprAsync, ExprAwait, + ExprBinary, ExprBlock, ExprBreak, ExprCall, ExprCast, ExprClosure, ExprConst, ExprContinue, + ExprField, ExprForLoop, ExprGroup, ExprIf, ExprIndex, ExprInfer, ExprLet, ExprLit, ExprLoop, + ExprMacro, ExprMatch, ExprMethodCall, ExprParen, ExprPath, ExprRange, ExprReference, + ExprRepeat, ExprReturn, ExprStruct, ExprTry, ExprTryBlock, ExprTuple, ExprUnary, ExprUnsafe, + ExprWhile, ExprYield, FieldValue, Index, Label, Member, RangeLimits, ReturnType, Stmt, Token, + UnOp, +}; + +impl Printer { + pub fn expr(&mut self, expr: &Expr) { + match expr { + Expr::Array(expr) => self.expr_array(expr), + Expr::Assign(expr) => self.expr_assign(expr), + Expr::Async(expr) => self.expr_async(expr), + Expr::Await(expr) => self.expr_await(expr, false), + Expr::Binary(expr) => self.expr_binary(expr), + Expr::Block(expr) => self.expr_block(expr), + Expr::Break(expr) => self.expr_break(expr), + Expr::Call(expr) => self.expr_call(expr, false), + Expr::Cast(expr) => self.expr_cast(expr), + Expr::Closure(expr) => self.expr_closure(expr), + Expr::Const(expr) => self.expr_const(expr), + Expr::Continue(expr) => self.expr_continue(expr), + Expr::Field(expr) => self.expr_field(expr, false), + Expr::ForLoop(expr) => self.expr_for_loop(expr), + Expr::Group(expr) => self.expr_group(expr), + Expr::If(expr) => self.expr_if(expr), + Expr::Index(expr) => self.expr_index(expr, false), + Expr::Infer(expr) => self.expr_infer(expr), + Expr::Let(expr) => self.expr_let(expr), + Expr::Lit(expr) => self.expr_lit(expr), + Expr::Loop(expr) => self.expr_loop(expr), + Expr::Macro(expr) => self.expr_macro(expr), + Expr::Match(expr) => self.expr_match(expr), + Expr::MethodCall(expr) => self.expr_method_call(expr, false), + Expr::Paren(expr) => self.expr_paren(expr), + Expr::Path(expr) => self.expr_path(expr), + Expr::Range(expr) => self.expr_range(expr), + Expr::Reference(expr) => self.expr_reference(expr), + Expr::Repeat(expr) => self.expr_repeat(expr), + Expr::Return(expr) => self.expr_return(expr), + Expr::Struct(expr) => self.expr_struct(expr), + Expr::Try(expr) => self.expr_try(expr, false), + Expr::TryBlock(expr) => self.expr_try_block(expr), + Expr::Tuple(expr) => self.expr_tuple(expr), + Expr::Unary(expr) => self.expr_unary(expr), + Expr::Unsafe(expr) => self.expr_unsafe(expr), + Expr::Verbatim(expr) => self.expr_verbatim(expr), + Expr::While(expr) => self.expr_while(expr), + Expr::Yield(expr) => self.expr_yield(expr), + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown Expr"), + } + } + + pub fn expr_beginning_of_line(&mut self, expr: &Expr, beginning_of_line: bool) { + match expr { + Expr::Await(expr) => self.expr_await(expr, beginning_of_line), + Expr::Field(expr) => self.expr_field(expr, beginning_of_line), + Expr::Index(expr) => self.expr_index(expr, beginning_of_line), + Expr::MethodCall(expr) => self.expr_method_call(expr, beginning_of_line), + Expr::Try(expr) => self.expr_try(expr, beginning_of_line), + _ => self.expr(expr), + } + } + + fn subexpr(&mut self, expr: &Expr, beginning_of_line: bool) { + match expr { + Expr::Await(expr) => self.subexpr_await(expr, beginning_of_line), + Expr::Call(expr) => self.subexpr_call(expr), + Expr::Field(expr) => self.subexpr_field(expr, beginning_of_line), + Expr::Index(expr) => self.subexpr_index(expr, beginning_of_line), + Expr::MethodCall(expr) => self.subexpr_method_call(expr, beginning_of_line, false), + Expr::Try(expr) => self.subexpr_try(expr, beginning_of_line), + _ => { + self.cbox(-INDENT); + self.expr(expr); + self.end(); + } + } + } + + fn wrap_exterior_struct(&mut self, expr: &Expr) { + let needs_paren = contains_exterior_struct_lit(expr); + if needs_paren { + self.word("("); + } + self.cbox(0); + self.expr(expr); + if needs_paren { + self.word(")"); + } + if needs_newline_if_wrap(expr) { + self.space(); + } else { + self.nbsp(); + } + self.end(); + } + + fn expr_array(&mut self, expr: &ExprArray) { + self.outer_attrs(&expr.attrs); + self.word("["); + self.cbox(INDENT); + self.zerobreak(); + for element in expr.elems.iter().delimited() { + self.expr(&element); + self.trailing_comma(element.is_last); + } + self.offset(-INDENT); + self.end(); + self.word("]"); + } + + fn expr_assign(&mut self, expr: &ExprAssign) { + self.outer_attrs(&expr.attrs); + self.ibox(0); + self.expr(&expr.left); + self.word(" = "); + self.expr(&expr.right); + self.end(); + } + + fn expr_async(&mut self, expr: &ExprAsync) { + self.outer_attrs(&expr.attrs); + self.word("async "); + if expr.capture.is_some() { + self.word("move "); + } + self.cbox(INDENT); + self.small_block(&expr.block, &expr.attrs); + self.end(); + } + + fn expr_await(&mut self, expr: &ExprAwait, beginning_of_line: bool) { + self.outer_attrs(&expr.attrs); + self.cbox(INDENT); + self.subexpr_await(expr, beginning_of_line); + self.end(); + } + + fn subexpr_await(&mut self, expr: &ExprAwait, beginning_of_line: bool) { + self.subexpr(&expr.base, beginning_of_line); + self.zerobreak_unless_short_ident(beginning_of_line, &expr.base); + self.word(".await"); + } + + fn expr_binary(&mut self, expr: &ExprBinary) { + self.outer_attrs(&expr.attrs); + self.ibox(INDENT); + self.ibox(-INDENT); + self.expr(&expr.left); + self.end(); + self.space(); + self.binary_operator(&expr.op); + self.nbsp(); + self.expr(&expr.right); + self.end(); + } + + pub fn expr_block(&mut self, expr: &ExprBlock) { + self.outer_attrs(&expr.attrs); + if let Some(label) = &expr.label { + self.label(label); + } + self.cbox(INDENT); + self.small_block(&expr.block, &expr.attrs); + self.end(); + } + + fn expr_break(&mut self, expr: &ExprBreak) { + self.outer_attrs(&expr.attrs); + self.word("break"); + if let Some(lifetime) = &expr.label { + self.nbsp(); + self.lifetime(lifetime); + } + if let Some(value) = &expr.expr { + self.nbsp(); + self.expr(value); + } + } + + fn expr_call(&mut self, expr: &ExprCall, beginning_of_line: bool) { + self.outer_attrs(&expr.attrs); + self.expr_beginning_of_line(&expr.func, beginning_of_line); + self.word("("); + self.call_args(&expr.args); + self.word(")"); + } + + fn subexpr_call(&mut self, expr: &ExprCall) { + self.subexpr(&expr.func, false); + self.word("("); + self.call_args(&expr.args); + self.word(")"); + } + + fn expr_cast(&mut self, expr: &ExprCast) { + self.outer_attrs(&expr.attrs); + self.ibox(INDENT); + self.ibox(-INDENT); + self.expr(&expr.expr); + self.end(); + self.space(); + self.word("as "); + self.ty(&expr.ty); + self.end(); + } + + fn expr_closure(&mut self, expr: &ExprClosure) { + self.outer_attrs(&expr.attrs); + self.ibox(0); + if let Some(bound_lifetimes) = &expr.lifetimes { + self.bound_lifetimes(bound_lifetimes); + } + if expr.constness.is_some() { + self.word("const "); + } + if expr.movability.is_some() { + self.word("static "); + } + if expr.asyncness.is_some() { + self.word("async "); + } + if expr.capture.is_some() { + self.word("move "); + } + self.cbox(INDENT); + self.word("|"); + for pat in expr.inputs.iter().delimited() { + if pat.is_first { + self.zerobreak(); + } + self.pat(&pat); + if !pat.is_last { + self.word(","); + self.space(); + } + } + match &expr.output { + ReturnType::Default => { + self.word("|"); + self.space(); + self.offset(-INDENT); + self.end(); + self.neverbreak(); + let wrap_in_brace = match &*expr.body { + Expr::Match(ExprMatch { attrs, .. }) | Expr::Call(ExprCall { attrs, .. }) => { + attr::has_outer(attrs) + } + body => !is_blocklike(body), + }; + if wrap_in_brace { + self.cbox(INDENT); + self.scan_break(BreakToken { + pre_break: Some('{'), + ..BreakToken::default() + }); + self.expr(&expr.body); + self.scan_break(BreakToken { + offset: -INDENT, + pre_break: stmt::add_semi(&expr.body).then(|| ';'), + post_break: Some('}'), + ..BreakToken::default() + }); + self.end(); + } else { + self.expr(&expr.body); + } + } + ReturnType::Type(_arrow, ty) => { + if !expr.inputs.is_empty() { + self.trailing_comma(true); + self.offset(-INDENT); + } + self.word("|"); + self.end(); + self.word(" -> "); + self.ty(ty); + self.nbsp(); + self.neverbreak(); + self.expr(&expr.body); + } + } + self.end(); + } + + pub fn expr_const(&mut self, expr: &ExprConst) { + self.outer_attrs(&expr.attrs); + self.word("const "); + self.cbox(INDENT); + self.small_block(&expr.block, &expr.attrs); + self.end(); + } + + fn expr_continue(&mut self, expr: &ExprContinue) { + self.outer_attrs(&expr.attrs); + self.word("continue"); + if let Some(lifetime) = &expr.label { + self.nbsp(); + self.lifetime(lifetime); + } + } + + fn expr_field(&mut self, expr: &ExprField, beginning_of_line: bool) { + self.outer_attrs(&expr.attrs); + self.cbox(INDENT); + self.subexpr_field(expr, beginning_of_line); + self.end(); + } + + fn subexpr_field(&mut self, expr: &ExprField, beginning_of_line: bool) { + self.subexpr(&expr.base, beginning_of_line); + self.zerobreak_unless_short_ident(beginning_of_line, &expr.base); + self.word("."); + self.member(&expr.member); + } + + fn expr_for_loop(&mut self, expr: &ExprForLoop) { + self.outer_attrs(&expr.attrs); + self.ibox(0); + if let Some(label) = &expr.label { + self.label(label); + } + self.word("for "); + self.pat(&expr.pat); + self.word(" in "); + self.neverbreak(); + self.wrap_exterior_struct(&expr.expr); + self.word("{"); + self.neverbreak(); + self.cbox(INDENT); + self.hardbreak_if_nonempty(); + self.inner_attrs(&expr.attrs); + for stmt in &expr.body.stmts { + self.stmt(stmt); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.end(); + } + + fn expr_group(&mut self, expr: &ExprGroup) { + self.outer_attrs(&expr.attrs); + self.expr(&expr.expr); + } + + fn expr_if(&mut self, expr: &ExprIf) { + self.outer_attrs(&expr.attrs); + self.cbox(INDENT); + self.word("if "); + self.cbox(-INDENT); + self.wrap_exterior_struct(&expr.cond); + self.end(); + if let Some((_else_token, else_branch)) = &expr.else_branch { + let mut else_branch = &**else_branch; + self.small_block(&expr.then_branch, &[]); + loop { + self.word(" else "); + match else_branch { + Expr::If(expr) => { + self.word("if "); + self.cbox(-INDENT); + self.wrap_exterior_struct(&expr.cond); + self.end(); + self.small_block(&expr.then_branch, &[]); + if let Some((_else_token, next)) = &expr.else_branch { + else_branch = next; + continue; + } + } + Expr::Block(expr) => { + self.small_block(&expr.block, &[]); + } + // If not one of the valid expressions to exist in an else + // clause, wrap in a block. + other => { + self.word("{"); + self.space(); + self.ibox(INDENT); + self.expr(other); + self.end(); + self.space(); + self.offset(-INDENT); + self.word("}"); + } + } + break; + } + } else if expr.then_branch.stmts.is_empty() { + self.word("{}"); + } else { + self.word("{"); + self.hardbreak(); + for stmt in &expr.then_branch.stmts { + self.stmt(stmt); + } + self.offset(-INDENT); + self.word("}"); + } + self.end(); + } + + fn expr_index(&mut self, expr: &ExprIndex, beginning_of_line: bool) { + self.outer_attrs(&expr.attrs); + self.expr_beginning_of_line(&expr.expr, beginning_of_line); + self.word("["); + self.expr(&expr.index); + self.word("]"); + } + + fn subexpr_index(&mut self, expr: &ExprIndex, beginning_of_line: bool) { + self.subexpr(&expr.expr, beginning_of_line); + self.word("["); + self.expr(&expr.index); + self.word("]"); + } + + fn expr_infer(&mut self, expr: &ExprInfer) { + self.outer_attrs(&expr.attrs); + self.word("_"); + } + + fn expr_let(&mut self, expr: &ExprLet) { + self.outer_attrs(&expr.attrs); + self.ibox(INDENT); + self.word("let "); + self.ibox(-INDENT); + self.pat(&expr.pat); + self.end(); + self.space(); + self.word("= "); + let needs_paren = contains_exterior_struct_lit(&expr.expr); + if needs_paren { + self.word("("); + } + self.expr(&expr.expr); + if needs_paren { + self.word(")"); + } + self.end(); + } + + pub fn expr_lit(&mut self, expr: &ExprLit) { + self.outer_attrs(&expr.attrs); + self.lit(&expr.lit); + } + + fn expr_loop(&mut self, expr: &ExprLoop) { + self.outer_attrs(&expr.attrs); + if let Some(label) = &expr.label { + self.label(label); + } + self.word("loop {"); + self.cbox(INDENT); + self.hardbreak_if_nonempty(); + self.inner_attrs(&expr.attrs); + for stmt in &expr.body.stmts { + self.stmt(stmt); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + + pub fn expr_macro(&mut self, expr: &ExprMacro) { + self.outer_attrs(&expr.attrs); + self.mac(&expr.mac, None); + } + + fn expr_match(&mut self, expr: &ExprMatch) { + self.outer_attrs(&expr.attrs); + self.ibox(0); + self.word("match "); + self.wrap_exterior_struct(&expr.expr); + self.word("{"); + self.neverbreak(); + self.cbox(INDENT); + self.hardbreak_if_nonempty(); + self.inner_attrs(&expr.attrs); + for arm in &expr.arms { + self.arm(arm); + self.hardbreak(); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.end(); + } + + fn expr_method_call(&mut self, expr: &ExprMethodCall, beginning_of_line: bool) { + self.outer_attrs(&expr.attrs); + self.cbox(INDENT); + let unindent_call_args = beginning_of_line && is_short_ident(&expr.receiver); + self.subexpr_method_call(expr, beginning_of_line, unindent_call_args); + self.end(); + } + + fn subexpr_method_call( + &mut self, + expr: &ExprMethodCall, + beginning_of_line: bool, + unindent_call_args: bool, + ) { + self.subexpr(&expr.receiver, beginning_of_line); + self.zerobreak_unless_short_ident(beginning_of_line, &expr.receiver); + self.word("."); + self.ident(&expr.method); + if let Some(turbofish) = &expr.turbofish { + self.angle_bracketed_generic_arguments(turbofish, PathKind::Expr); + } + self.cbox(if unindent_call_args { -INDENT } else { 0 }); + self.word("("); + self.call_args(&expr.args); + self.word(")"); + self.end(); + } + + fn expr_paren(&mut self, expr: &ExprParen) { + self.outer_attrs(&expr.attrs); + self.word("("); + self.expr(&expr.expr); + self.word(")"); + } + + pub fn expr_path(&mut self, expr: &ExprPath) { + self.outer_attrs(&expr.attrs); + self.qpath(&expr.qself, &expr.path, PathKind::Expr); + } + + pub fn expr_range(&mut self, expr: &ExprRange) { + self.outer_attrs(&expr.attrs); + if let Some(start) = &expr.start { + self.expr(start); + } + self.word(match expr.limits { + RangeLimits::HalfOpen(_) => "..", + RangeLimits::Closed(_) => "..=", + }); + if let Some(end) = &expr.end { + self.expr(end); + } + } + + fn expr_reference(&mut self, expr: &ExprReference) { + self.outer_attrs(&expr.attrs); + self.word("&"); + if expr.mutability.is_some() { + self.word("mut "); + } + self.expr(&expr.expr); + } + + fn expr_repeat(&mut self, expr: &ExprRepeat) { + self.outer_attrs(&expr.attrs); + self.word("["); + self.expr(&expr.expr); + self.word("; "); + self.expr(&expr.len); + self.word("]"); + } + + fn expr_return(&mut self, expr: &ExprReturn) { + self.outer_attrs(&expr.attrs); + self.word("return"); + if let Some(value) = &expr.expr { + self.nbsp(); + self.expr(value); + } + } + + fn expr_struct(&mut self, expr: &ExprStruct) { + self.outer_attrs(&expr.attrs); + self.cbox(INDENT); + self.ibox(-INDENT); + self.qpath(&expr.qself, &expr.path, PathKind::Expr); + self.end(); + self.word(" {"); + self.space_if_nonempty(); + for field_value in expr.fields.iter().delimited() { + self.field_value(&field_value); + self.trailing_comma_or_space(field_value.is_last && expr.rest.is_none()); + } + if let Some(rest) = &expr.rest { + self.word(".."); + self.expr(rest); + self.space(); + } + self.offset(-INDENT); + self.end_with_max_width(34); + self.word("}"); + } + + fn expr_try(&mut self, expr: &ExprTry, beginning_of_line: bool) { + self.outer_attrs(&expr.attrs); + self.expr_beginning_of_line(&expr.expr, beginning_of_line); + self.word("?"); + } + + fn subexpr_try(&mut self, expr: &ExprTry, beginning_of_line: bool) { + self.subexpr(&expr.expr, beginning_of_line); + self.word("?"); + } + + fn expr_try_block(&mut self, expr: &ExprTryBlock) { + self.outer_attrs(&expr.attrs); + self.word("try "); + self.cbox(INDENT); + self.small_block(&expr.block, &expr.attrs); + self.end(); + } + + fn expr_tuple(&mut self, expr: &ExprTuple) { + self.outer_attrs(&expr.attrs); + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + for elem in expr.elems.iter().delimited() { + self.expr(&elem); + if expr.elems.len() == 1 { + self.word(","); + self.zerobreak(); + } else { + self.trailing_comma(elem.is_last); + } + } + self.offset(-INDENT); + self.end(); + self.word(")"); + } + + fn expr_unary(&mut self, expr: &ExprUnary) { + self.outer_attrs(&expr.attrs); + self.unary_operator(&expr.op); + self.expr(&expr.expr); + } + + fn expr_unsafe(&mut self, expr: &ExprUnsafe) { + self.outer_attrs(&expr.attrs); + self.word("unsafe "); + self.cbox(INDENT); + self.small_block(&expr.block, &expr.attrs); + self.end(); + } + + #[cfg(not(feature = "verbatim"))] + fn expr_verbatim(&mut self, expr: &TokenStream) { + if !expr.is_empty() { + unimplemented!("Expr::Verbatim `{}`", expr); + } + } + + #[cfg(feature = "verbatim")] + fn expr_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + + enum ExprVerbatim { + Empty, + RawReference(RawReference), + } + + struct RawReference { + mutable: bool, + expr: Expr, + } + + mod kw { + syn::custom_keyword!(raw); + } + + impl Parse for ExprVerbatim { + fn parse(input: ParseStream) -> Result { + let lookahead = input.lookahead1(); + if input.is_empty() { + Ok(ExprVerbatim::Empty) + } else if lookahead.peek(Token![&]) { + input.parse::()?; + input.parse::()?; + let mutable = input.parse::>()?.is_some(); + if !mutable { + input.parse::()?; + } + let expr: Expr = input.parse()?; + Ok(ExprVerbatim::RawReference(RawReference { mutable, expr })) + } else { + Err(lookahead.error()) + } + } + } + + let expr: ExprVerbatim = match syn::parse2(tokens.clone()) { + Ok(expr) => expr, + Err(_) => unimplemented!("Expr::Verbatim `{}`", tokens), + }; + + match expr { + ExprVerbatim::Empty => {} + ExprVerbatim::RawReference(expr) => { + self.word("&raw "); + self.word(if expr.mutable { "mut " } else { "const " }); + self.expr(&expr.expr); + } + } + } + + fn expr_while(&mut self, expr: &ExprWhile) { + self.outer_attrs(&expr.attrs); + if let Some(label) = &expr.label { + self.label(label); + } + self.word("while "); + self.wrap_exterior_struct(&expr.cond); + self.word("{"); + self.neverbreak(); + self.cbox(INDENT); + self.hardbreak_if_nonempty(); + self.inner_attrs(&expr.attrs); + for stmt in &expr.body.stmts { + self.stmt(stmt); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + + fn expr_yield(&mut self, expr: &ExprYield) { + self.outer_attrs(&expr.attrs); + self.word("yield"); + if let Some(value) = &expr.expr { + self.nbsp(); + self.expr(value); + } + } + + fn label(&mut self, label: &Label) { + self.lifetime(&label.name); + self.word(": "); + } + + fn field_value(&mut self, field_value: &FieldValue) { + self.outer_attrs(&field_value.attrs); + self.member(&field_value.member); + if field_value.colon_token.is_some() { + self.word(": "); + self.ibox(0); + self.expr(&field_value.expr); + self.end(); + } + } + + fn arm(&mut self, arm: &Arm) { + self.outer_attrs(&arm.attrs); + self.ibox(0); + self.pat(&arm.pat); + if let Some((_if_token, guard)) = &arm.guard { + self.word(" if "); + self.expr(guard); + } + self.word(" =>"); + let empty_block; + let mut body = &*arm.body; + while let Expr::Block(expr) = body { + if expr.attrs.is_empty() && expr.label.is_none() { + let mut stmts = expr.block.stmts.iter(); + if let (Some(Stmt::Expr(inner, None)), None) = (stmts.next(), stmts.next()) { + body = inner; + continue; + } + } + break; + } + if let Expr::Tuple(expr) = body { + if expr.elems.is_empty() && expr.attrs.is_empty() { + empty_block = Expr::Block(ExprBlock { + attrs: Vec::new(), + label: None, + block: Block { + brace_token: token::Brace::default(), + stmts: Vec::new(), + }, + }); + body = &empty_block; + } + } + if let Expr::Block(body) = body { + self.nbsp(); + if let Some(label) = &body.label { + self.label(label); + } + self.word("{"); + self.neverbreak(); + self.cbox(INDENT); + self.hardbreak_if_nonempty(); + self.inner_attrs(&body.attrs); + for stmt in &body.block.stmts { + self.stmt(stmt); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.end(); + } else { + self.nbsp(); + self.neverbreak(); + self.cbox(INDENT); + self.scan_break(BreakToken { + pre_break: Some('{'), + ..BreakToken::default() + }); + self.expr_beginning_of_line(body, true); + self.scan_break(BreakToken { + offset: -INDENT, + pre_break: stmt::add_semi(body).then(|| ';'), + post_break: Some('}'), + no_break: requires_terminator(body).then(|| ','), + ..BreakToken::default() + }); + self.end(); + self.end(); + } + } + + fn call_args(&mut self, args: &Punctuated) { + let mut iter = args.iter(); + match (iter.next(), iter.next()) { + (Some(expr), None) if is_blocklike(expr) => { + self.expr(expr); + } + _ => { + self.cbox(INDENT); + self.zerobreak(); + for arg in args.iter().delimited() { + self.expr(&arg); + self.trailing_comma(arg.is_last); + } + self.offset(-INDENT); + self.end(); + } + } + } + + pub fn small_block(&mut self, block: &Block, attrs: &[Attribute]) { + self.word("{"); + if attr::has_inner(attrs) || !block.stmts.is_empty() { + self.space(); + self.inner_attrs(attrs); + match (block.stmts.get(0), block.stmts.get(1)) { + (Some(Stmt::Expr(expr, None)), None) if stmt::break_after(expr) => { + self.ibox(0); + self.expr_beginning_of_line(expr, true); + self.end(); + self.space(); + } + _ => { + for stmt in &block.stmts { + self.stmt(stmt); + } + } + } + self.offset(-INDENT); + } + self.word("}"); + } + + pub fn member(&mut self, member: &Member) { + match member { + Member::Named(ident) => self.ident(ident), + Member::Unnamed(index) => self.index(index), + } + } + + fn index(&mut self, member: &Index) { + self.word(member.index.to_string()); + } + + fn binary_operator(&mut self, op: &BinOp) { + self.word(match op { + BinOp::Add(_) => "+", + BinOp::Sub(_) => "-", + BinOp::Mul(_) => "*", + BinOp::Div(_) => "/", + BinOp::Rem(_) => "%", + BinOp::And(_) => "&&", + BinOp::Or(_) => "||", + BinOp::BitXor(_) => "^", + BinOp::BitAnd(_) => "&", + BinOp::BitOr(_) => "|", + BinOp::Shl(_) => "<<", + BinOp::Shr(_) => ">>", + BinOp::Eq(_) => "==", + BinOp::Lt(_) => "<", + BinOp::Le(_) => "<=", + BinOp::Ne(_) => "!=", + BinOp::Ge(_) => ">=", + BinOp::Gt(_) => ">", + BinOp::AddAssign(_) => "+=", + BinOp::SubAssign(_) => "-=", + BinOp::MulAssign(_) => "*=", + BinOp::DivAssign(_) => "/=", + BinOp::RemAssign(_) => "%=", + BinOp::BitXorAssign(_) => "^=", + BinOp::BitAndAssign(_) => "&=", + BinOp::BitOrAssign(_) => "|=", + BinOp::ShlAssign(_) => "<<=", + BinOp::ShrAssign(_) => ">>=", + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown BinOp"), + }); + } + + fn unary_operator(&mut self, op: &UnOp) { + self.word(match op { + UnOp::Deref(_) => "*", + UnOp::Not(_) => "!", + UnOp::Neg(_) => "-", + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown UnOp"), + }); + } + + fn zerobreak_unless_short_ident(&mut self, beginning_of_line: bool, expr: &Expr) { + if beginning_of_line && is_short_ident(expr) { + return; + } + self.zerobreak(); + } +} + +pub fn requires_terminator(expr: &Expr) -> bool { + // see https://github.com/rust-lang/rust/blob/a266f1199/compiler/rustc_ast/src/util/classify.rs#L7-L26 + match expr { + Expr::If(_) + | Expr::Match(_) + | Expr::Block(_) | Expr::Unsafe(_) // both under ExprKind::Block in rustc + | Expr::While(_) + | Expr::Loop(_) + | Expr::ForLoop(_) + | Expr::TryBlock(_) + | Expr::Const(_) => false, + + Expr::Array(_) + | Expr::Assign(_) + | Expr::Async(_) + | Expr::Await(_) + | Expr::Binary(_) + | Expr::Break(_) + | Expr::Call(_) + | Expr::Cast(_) + | Expr::Closure(_) + | Expr::Continue(_) + | Expr::Field(_) + | Expr::Group(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Let(_) + | Expr::Lit(_) + | Expr::Macro(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Range(_) + | Expr::Reference(_) + | Expr::Repeat(_) + | Expr::Return(_) + | Expr::Struct(_) + | Expr::Try(_) + | Expr::Tuple(_) + | Expr::Unary(_) + | Expr::Verbatim(_) + | Expr::Yield(_) => true, + + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => true, + } +} + +// Expressions that syntactically contain an "exterior" struct literal i.e. not +// surrounded by any parens or other delimiters. For example `X { y: 1 }`, `X { +// y: 1 }.method()`, `foo == X { y: 1 }` and `X { y: 1 } == foo` all do, but `(X +// { y: 1 }) == foo` does not. +fn contains_exterior_struct_lit(expr: &Expr) -> bool { + match expr { + Expr::Struct(_) => true, + + Expr::Assign(ExprAssign { left, right, .. }) + | Expr::Binary(ExprBinary { left, right, .. }) => { + // X { y: 1 } + X { y: 2 } + contains_exterior_struct_lit(left) || contains_exterior_struct_lit(right) + } + + Expr::Await(ExprAwait { base: e, .. }) + | Expr::Cast(ExprCast { expr: e, .. }) + | Expr::Field(ExprField { base: e, .. }) + | Expr::Index(ExprIndex { expr: e, .. }) + | Expr::MethodCall(ExprMethodCall { receiver: e, .. }) + | Expr::Reference(ExprReference { expr: e, .. }) + | Expr::Unary(ExprUnary { expr: e, .. }) => { + // &X { y: 1 }, X { y: 1 }.y + contains_exterior_struct_lit(e) + } + + Expr::Array(_) + | Expr::Async(_) + | Expr::Block(_) + | Expr::Break(_) + | Expr::Call(_) + | Expr::Closure(_) + | Expr::Const(_) + | Expr::Continue(_) + | Expr::ForLoop(_) + | Expr::Group(_) + | Expr::If(_) + | Expr::Infer(_) + | Expr::Let(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Range(_) + | Expr::Repeat(_) + | Expr::Return(_) + | Expr::Try(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unsafe(_) + | Expr::Verbatim(_) + | Expr::While(_) + | Expr::Yield(_) => false, + + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => false, + } +} + +fn needs_newline_if_wrap(expr: &Expr) -> bool { + match expr { + Expr::Array(_) + | Expr::Async(_) + | Expr::Block(_) + | Expr::Break(ExprBreak { expr: None, .. }) + | Expr::Closure(_) + | Expr::Const(_) + | Expr::Continue(_) + | Expr::ForLoop(_) + | Expr::If(_) + | Expr::Infer(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::Path(_) + | Expr::Range(ExprRange { end: None, .. }) + | Expr::Repeat(_) + | Expr::Return(ExprReturn { expr: None, .. }) + | Expr::Struct(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unsafe(_) + | Expr::Verbatim(_) + | Expr::While(_) + | Expr::Yield(ExprYield { expr: None, .. }) => false, + + Expr::Assign(_) + | Expr::Await(_) + | Expr::Binary(_) + | Expr::Cast(_) + | Expr::Field(_) + | Expr::Index(_) + | Expr::MethodCall(_) => true, + + Expr::Break(ExprBreak { expr: Some(e), .. }) + | Expr::Call(ExprCall { func: e, .. }) + | Expr::Group(ExprGroup { expr: e, .. }) + | Expr::Let(ExprLet { expr: e, .. }) + | Expr::Paren(ExprParen { expr: e, .. }) + | Expr::Range(ExprRange { end: Some(e), .. }) + | Expr::Reference(ExprReference { expr: e, .. }) + | Expr::Return(ExprReturn { expr: Some(e), .. }) + | Expr::Try(ExprTry { expr: e, .. }) + | Expr::Unary(ExprUnary { expr: e, .. }) + | Expr::Yield(ExprYield { expr: Some(e), .. }) => needs_newline_if_wrap(e), + + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => false, + } +} + +fn is_short_ident(expr: &Expr) -> bool { + if let Expr::Path(expr) = expr { + return expr.attrs.is_empty() + && expr.qself.is_none() + && expr + .path + .get_ident() + .map_or(false, |ident| ident.to_string().len() as isize <= INDENT); + } + false +} + +fn is_blocklike(expr: &Expr) -> bool { + match expr { + Expr::Array(ExprArray { attrs, .. }) + | Expr::Async(ExprAsync { attrs, .. }) + | Expr::Block(ExprBlock { attrs, .. }) + | Expr::Closure(ExprClosure { attrs, .. }) + | Expr::Const(ExprConst { attrs, .. }) + | Expr::Struct(ExprStruct { attrs, .. }) + | Expr::TryBlock(ExprTryBlock { attrs, .. }) + | Expr::Tuple(ExprTuple { attrs, .. }) + | Expr::Unsafe(ExprUnsafe { attrs, .. }) => !attr::has_outer(attrs), + + Expr::Assign(_) + | Expr::Await(_) + | Expr::Binary(_) + | Expr::Break(_) + | Expr::Call(_) + | Expr::Cast(_) + | Expr::Continue(_) + | Expr::Field(_) + | Expr::ForLoop(_) + | Expr::Group(_) + | Expr::If(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Let(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Range(_) + | Expr::Reference(_) + | Expr::Repeat(_) + | Expr::Return(_) + | Expr::Try(_) + | Expr::Unary(_) + | Expr::Verbatim(_) + | Expr::While(_) + | Expr::Yield(_) => false, + + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => false, + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/file.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/file.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/file.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/file.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,17 @@ +use crate::algorithm::Printer; +use syn::File; + +impl Printer { + pub fn file(&mut self, file: &File) { + self.cbox(0); + if let Some(shebang) = &file.shebang { + self.word(shebang.clone()); + self.hardbreak(); + } + self.inner_attrs(&file.attrs); + for item in &file.items { + self.item(item); + } + self.end(); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/generics.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/generics.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/generics.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/generics.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,325 @@ +use crate::algorithm::Printer; +use crate::iter::IterDelimited; +use crate::path::PathKind; +use crate::INDENT; +use proc_macro2::TokenStream; +use std::ptr; +use syn::{ + BoundLifetimes, ConstParam, GenericParam, Generics, LifetimeParam, PredicateLifetime, + PredicateType, TraitBound, TraitBoundModifier, TypeParam, TypeParamBound, WhereClause, + WherePredicate, +}; + +impl Printer { + pub fn generics(&mut self, generics: &Generics) { + if generics.params.is_empty() { + return; + } + + self.word("<"); + self.cbox(0); + self.zerobreak(); + + // Print lifetimes before types and consts, regardless of their + // order in self.params. + #[derive(Ord, PartialOrd, Eq, PartialEq)] + enum Group { + First, + Second, + } + fn group(param: &GenericParam) -> Group { + match param { + GenericParam::Lifetime(_) => Group::First, + GenericParam::Type(_) | GenericParam::Const(_) => Group::Second, + } + } + let last = generics.params.iter().max_by_key(|param| group(param)); + for current_group in [Group::First, Group::Second] { + for param in &generics.params { + if group(param) == current_group { + self.generic_param(param); + self.trailing_comma(ptr::eq(param, last.unwrap())); + } + } + } + + self.offset(-INDENT); + self.end(); + self.word(">"); + } + + fn generic_param(&mut self, generic_param: &GenericParam) { + match generic_param { + GenericParam::Type(type_param) => self.type_param(type_param), + GenericParam::Lifetime(lifetime_param) => self.lifetime_param(lifetime_param), + GenericParam::Const(const_param) => self.const_param(const_param), + } + } + + pub fn bound_lifetimes(&mut self, bound_lifetimes: &BoundLifetimes) { + self.word("for<"); + for param in bound_lifetimes.lifetimes.iter().delimited() { + self.generic_param(¶m); + if !param.is_last { + self.word(", "); + } + } + self.word("> "); + } + + fn lifetime_param(&mut self, lifetime_param: &LifetimeParam) { + self.outer_attrs(&lifetime_param.attrs); + self.lifetime(&lifetime_param.lifetime); + for lifetime in lifetime_param.bounds.iter().delimited() { + if lifetime.is_first { + self.word(": "); + } else { + self.word(" + "); + } + self.lifetime(&lifetime); + } + } + + fn type_param(&mut self, type_param: &TypeParam) { + self.outer_attrs(&type_param.attrs); + self.ident(&type_param.ident); + self.ibox(INDENT); + for type_param_bound in type_param.bounds.iter().delimited() { + if type_param_bound.is_first { + self.word(": "); + } else { + self.space(); + self.word("+ "); + } + self.type_param_bound(&type_param_bound); + } + if let Some(default) = &type_param.default { + self.space(); + self.word("= "); + self.ty(default); + } + self.end(); + } + + pub fn type_param_bound(&mut self, type_param_bound: &TypeParamBound) { + match type_param_bound { + TypeParamBound::Trait(trait_bound) => { + let tilde_const = false; + self.trait_bound(trait_bound, tilde_const); + } + TypeParamBound::Lifetime(lifetime) => self.lifetime(lifetime), + TypeParamBound::Verbatim(bound) => self.type_param_bound_verbatim(bound), + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown TypeParamBound"), + } + } + + fn trait_bound(&mut self, trait_bound: &TraitBound, tilde_const: bool) { + if trait_bound.paren_token.is_some() { + self.word("("); + } + if tilde_const { + self.word("~const "); + } + self.trait_bound_modifier(&trait_bound.modifier); + if let Some(bound_lifetimes) = &trait_bound.lifetimes { + self.bound_lifetimes(bound_lifetimes); + } + for segment in trait_bound.path.segments.iter().delimited() { + if !segment.is_first || trait_bound.path.leading_colon.is_some() { + self.word("::"); + } + self.path_segment(&segment, PathKind::Type); + } + if trait_bound.paren_token.is_some() { + self.word(")"); + } + } + + fn trait_bound_modifier(&mut self, trait_bound_modifier: &TraitBoundModifier) { + match trait_bound_modifier { + TraitBoundModifier::None => {} + TraitBoundModifier::Maybe(_question_mark) => self.word("?"), + } + } + + #[cfg(not(feature = "verbatim"))] + fn type_param_bound_verbatim(&mut self, bound: &TokenStream) { + unimplemented!("TypeParamBound::Verbatim `{}`", bound); + } + + #[cfg(feature = "verbatim")] + fn type_param_bound_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::{parenthesized, token, Token}; + + enum TypeParamBoundVerbatim { + TildeConst(TraitBound), + } + + impl Parse for TypeParamBoundVerbatim { + fn parse(input: ParseStream) -> Result { + let content; + let (paren_token, content) = if input.peek(token::Paren) { + (Some(parenthesized!(content in input)), &content) + } else { + (None, input) + }; + content.parse::()?; + content.parse::()?; + let mut bound: TraitBound = content.parse()?; + bound.paren_token = paren_token; + Ok(TypeParamBoundVerbatim::TildeConst(bound)) + } + } + + let bound: TypeParamBoundVerbatim = match syn::parse2(tokens.clone()) { + Ok(bound) => bound, + Err(_) => unimplemented!("TypeParamBound::Verbatim `{}`", tokens), + }; + + match bound { + TypeParamBoundVerbatim::TildeConst(trait_bound) => { + let tilde_const = true; + self.trait_bound(&trait_bound, tilde_const); + } + } + } + + fn const_param(&mut self, const_param: &ConstParam) { + self.outer_attrs(&const_param.attrs); + self.word("const "); + self.ident(&const_param.ident); + self.word(": "); + self.ty(&const_param.ty); + if let Some(default) = &const_param.default { + self.word(" = "); + self.expr(default); + } + } + + pub fn where_clause_for_body(&mut self, where_clause: &Option) { + let hardbreaks = true; + let semi = false; + self.where_clause_impl(where_clause, hardbreaks, semi); + } + + pub fn where_clause_semi(&mut self, where_clause: &Option) { + let hardbreaks = true; + let semi = true; + self.where_clause_impl(where_clause, hardbreaks, semi); + } + + pub fn where_clause_oneline(&mut self, where_clause: &Option) { + let hardbreaks = false; + let semi = false; + self.where_clause_impl(where_clause, hardbreaks, semi); + } + + pub fn where_clause_oneline_semi(&mut self, where_clause: &Option) { + let hardbreaks = false; + let semi = true; + self.where_clause_impl(where_clause, hardbreaks, semi); + } + + fn where_clause_impl( + &mut self, + where_clause: &Option, + hardbreaks: bool, + semi: bool, + ) { + let where_clause = match where_clause { + Some(where_clause) if !where_clause.predicates.is_empty() => where_clause, + _ => { + if semi { + self.word(";"); + } else { + self.nbsp(); + } + return; + } + }; + if hardbreaks { + self.hardbreak(); + self.offset(-INDENT); + self.word("where"); + self.hardbreak(); + for predicate in where_clause.predicates.iter().delimited() { + self.where_predicate(&predicate); + if predicate.is_last && semi { + self.word(";"); + } else { + self.word(","); + self.hardbreak(); + } + } + if !semi { + self.offset(-INDENT); + } + } else { + self.space(); + self.offset(-INDENT); + self.word("where"); + self.space(); + for predicate in where_clause.predicates.iter().delimited() { + self.where_predicate(&predicate); + if predicate.is_last && semi { + self.word(";"); + } else { + self.trailing_comma_or_space(predicate.is_last); + } + } + if !semi { + self.offset(-INDENT); + } + } + } + + fn where_predicate(&mut self, predicate: &WherePredicate) { + match predicate { + WherePredicate::Type(predicate) => self.predicate_type(predicate), + WherePredicate::Lifetime(predicate) => self.predicate_lifetime(predicate), + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown WherePredicate"), + } + } + + fn predicate_type(&mut self, predicate: &PredicateType) { + if let Some(bound_lifetimes) = &predicate.lifetimes { + self.bound_lifetimes(bound_lifetimes); + } + self.ty(&predicate.bounded_ty); + self.word(":"); + if predicate.bounds.len() == 1 { + self.ibox(0); + } else { + self.ibox(INDENT); + } + for type_param_bound in predicate.bounds.iter().delimited() { + if type_param_bound.is_first { + self.nbsp(); + } else { + self.space(); + self.word("+ "); + } + self.type_param_bound(&type_param_bound); + } + self.end(); + } + + fn predicate_lifetime(&mut self, predicate: &PredicateLifetime) { + self.lifetime(&predicate.lifetime); + self.word(":"); + self.ibox(INDENT); + for lifetime in predicate.bounds.iter().delimited() { + if lifetime.is_first { + self.nbsp(); + } else { + self.space(); + self.word("+ "); + } + self.lifetime(&lifetime); + } + self.end(); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/item.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/item.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/item.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/item.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,1646 @@ +use crate::algorithm::Printer; +use crate::iter::IterDelimited; +use crate::path::PathKind; +use crate::INDENT; +use proc_macro2::TokenStream; +use syn::{ + Fields, FnArg, ForeignItem, ForeignItemFn, ForeignItemMacro, ForeignItemStatic, + ForeignItemType, ImplItem, ImplItemConst, ImplItemFn, ImplItemMacro, ImplItemType, Item, + ItemConst, ItemEnum, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, ItemMacro, ItemMod, + ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, ItemUnion, ItemUse, Receiver, + Signature, StaticMutability, TraitItem, TraitItemConst, TraitItemFn, TraitItemMacro, + TraitItemType, Type, UseGlob, UseGroup, UseName, UsePath, UseRename, UseTree, Variadic, +}; + +impl Printer { + pub fn item(&mut self, item: &Item) { + match item { + Item::Const(item) => self.item_const(item), + Item::Enum(item) => self.item_enum(item), + Item::ExternCrate(item) => self.item_extern_crate(item), + Item::Fn(item) => self.item_fn(item), + Item::ForeignMod(item) => self.item_foreign_mod(item), + Item::Impl(item) => self.item_impl(item), + Item::Macro(item) => self.item_macro(item), + Item::Mod(item) => self.item_mod(item), + Item::Static(item) => self.item_static(item), + Item::Struct(item) => self.item_struct(item), + Item::Trait(item) => self.item_trait(item), + Item::TraitAlias(item) => self.item_trait_alias(item), + Item::Type(item) => self.item_type(item), + Item::Union(item) => self.item_union(item), + Item::Use(item) => self.item_use(item), + Item::Verbatim(item) => self.item_verbatim(item), + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown Item"), + } + } + + fn item_const(&mut self, item: &ItemConst) { + self.outer_attrs(&item.attrs); + self.cbox(0); + self.visibility(&item.vis); + self.word("const "); + self.ident(&item.ident); + self.generics(&item.generics); + self.word(": "); + self.ty(&item.ty); + self.word(" = "); + self.neverbreak(); + self.expr(&item.expr); + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn item_enum(&mut self, item: &ItemEnum) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + self.word("enum "); + self.ident(&item.ident); + self.generics(&item.generics); + self.where_clause_for_body(&item.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + for variant in &item.variants { + self.variant(variant); + self.word(","); + self.hardbreak(); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn item_extern_crate(&mut self, item: &ItemExternCrate) { + self.outer_attrs(&item.attrs); + self.visibility(&item.vis); + self.word("extern crate "); + self.ident(&item.ident); + if let Some((_as_token, rename)) = &item.rename { + self.word(" as "); + self.ident(rename); + } + self.word(";"); + self.hardbreak(); + } + + fn item_fn(&mut self, item: &ItemFn) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + self.signature(&item.sig); + self.where_clause_for_body(&item.sig.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for stmt in &item.block.stmts { + self.stmt(stmt); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn item_foreign_mod(&mut self, item: &ItemForeignMod) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + if item.unsafety.is_some() { + self.word("unsafe "); + } + self.abi(&item.abi); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for foreign_item in &item.items { + self.foreign_item(foreign_item); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn item_impl(&mut self, item: &ItemImpl) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.ibox(-INDENT); + self.cbox(INDENT); + if item.defaultness.is_some() { + self.word("default "); + } + if item.unsafety.is_some() { + self.word("unsafe "); + } + self.word("impl"); + self.generics(&item.generics); + self.end(); + self.nbsp(); + if let Some((negative_polarity, path, _for_token)) = &item.trait_ { + if negative_polarity.is_some() { + self.word("!"); + } + self.path(path, PathKind::Type); + self.space(); + self.word("for "); + } + self.ty(&item.self_ty); + self.end(); + self.where_clause_for_body(&item.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for impl_item in &item.items { + self.impl_item(impl_item); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn item_macro(&mut self, item: &ItemMacro) { + self.outer_attrs(&item.attrs); + self.mac(&item.mac, item.ident.as_ref()); + self.mac_semi_if_needed(&item.mac.delimiter); + self.hardbreak(); + } + + fn item_mod(&mut self, item: &ItemMod) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + if item.unsafety.is_some() { + self.word("unsafe "); + } + self.word("mod "); + self.ident(&item.ident); + if let Some((_brace, items)) = &item.content { + self.word(" {"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for item in items { + self.item(item); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } else { + self.word(";"); + self.end(); + } + self.hardbreak(); + } + + fn item_static(&mut self, item: &ItemStatic) { + self.outer_attrs(&item.attrs); + self.cbox(0); + self.visibility(&item.vis); + self.word("static "); + self.static_mutability(&item.mutability); + self.ident(&item.ident); + self.word(": "); + self.ty(&item.ty); + self.word(" = "); + self.neverbreak(); + self.expr(&item.expr); + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn item_struct(&mut self, item: &ItemStruct) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + self.word("struct "); + self.ident(&item.ident); + self.generics(&item.generics); + match &item.fields { + Fields::Named(fields) => { + self.where_clause_for_body(&item.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + for field in &fields.named { + self.field(field); + self.word(","); + self.hardbreak(); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + Fields::Unnamed(fields) => { + self.fields_unnamed(fields); + self.where_clause_semi(&item.generics.where_clause); + self.end(); + } + Fields::Unit => { + self.where_clause_semi(&item.generics.where_clause); + self.end(); + } + } + self.hardbreak(); + } + + fn item_trait(&mut self, item: &ItemTrait) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + if item.unsafety.is_some() { + self.word("unsafe "); + } + if item.auto_token.is_some() { + self.word("auto "); + } + self.word("trait "); + self.ident(&item.ident); + self.generics(&item.generics); + for supertrait in item.supertraits.iter().delimited() { + if supertrait.is_first { + self.word(": "); + } else { + self.word(" + "); + } + self.type_param_bound(&supertrait); + } + self.where_clause_for_body(&item.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for trait_item in &item.items { + self.trait_item(trait_item); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn item_trait_alias(&mut self, item: &ItemTraitAlias) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + self.word("trait "); + self.ident(&item.ident); + self.generics(&item.generics); + self.word(" = "); + self.neverbreak(); + for bound in item.bounds.iter().delimited() { + if !bound.is_first { + self.space(); + self.word("+ "); + } + self.type_param_bound(&bound); + } + self.where_clause_semi(&item.generics.where_clause); + self.end(); + self.hardbreak(); + } + + fn item_type(&mut self, item: &ItemType) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + self.word("type "); + self.ident(&item.ident); + self.generics(&item.generics); + self.where_clause_oneline(&item.generics.where_clause); + self.word("= "); + self.neverbreak(); + self.ibox(-INDENT); + self.ty(&item.ty); + self.end(); + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn item_union(&mut self, item: &ItemUnion) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + self.word("union "); + self.ident(&item.ident); + self.generics(&item.generics); + self.where_clause_for_body(&item.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + for field in &item.fields.named { + self.field(field); + self.word(","); + self.hardbreak(); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn item_use(&mut self, item: &ItemUse) { + self.outer_attrs(&item.attrs); + self.visibility(&item.vis); + self.word("use "); + if item.leading_colon.is_some() { + self.word("::"); + } + self.use_tree(&item.tree); + self.word(";"); + self.hardbreak(); + } + + #[cfg(not(feature = "verbatim"))] + fn item_verbatim(&mut self, item: &TokenStream) { + if !item.is_empty() { + unimplemented!("Item::Verbatim `{}`", item); + } + self.hardbreak(); + } + + #[cfg(feature = "verbatim")] + fn item_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::punctuated::Punctuated; + use syn::{ + braced, parenthesized, token, Attribute, Generics, Ident, Lifetime, Token, Visibility, + }; + use verbatim::{ + FlexibleItemConst, FlexibleItemFn, FlexibleItemStatic, FlexibleItemType, + WhereClauseLocation, + }; + + enum ItemVerbatim { + Empty, + ConstFlexible(FlexibleItemConst), + FnFlexible(FlexibleItemFn), + ImplFlexible(ImplFlexible), + Macro2(Macro2), + StaticFlexible(FlexibleItemStatic), + TypeFlexible(FlexibleItemType), + UseBrace(UseBrace), + } + + struct ImplFlexible { + attrs: Vec, + vis: Visibility, + defaultness: bool, + unsafety: bool, + generics: Generics, + constness: ImplConstness, + negative_impl: bool, + trait_: Option, + self_ty: Type, + items: Vec, + } + + enum ImplConstness { + None, + MaybeConst, + Const, + } + + struct Macro2 { + attrs: Vec, + vis: Visibility, + ident: Ident, + args: Option, + body: TokenStream, + } + + struct UseBrace { + attrs: Vec, + vis: Visibility, + trees: Punctuated, + } + + struct RootUseTree { + leading_colon: Option, + inner: UseTree, + } + + impl Parse for ImplConstness { + fn parse(input: ParseStream) -> Result { + if input.parse::>()?.is_some() { + input.parse::()?; + Ok(ImplConstness::MaybeConst) + } else if input.parse::>()?.is_some() { + Ok(ImplConstness::Const) + } else { + Ok(ImplConstness::None) + } + } + } + + impl Parse for RootUseTree { + fn parse(input: ParseStream) -> Result { + Ok(RootUseTree { + leading_colon: input.parse()?, + inner: input.parse()?, + }) + } + } + + impl Parse for ItemVerbatim { + fn parse(input: ParseStream) -> Result { + if input.is_empty() { + return Ok(ItemVerbatim::Empty); + } + + let mut attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + + let lookahead = input.lookahead1(); + if lookahead.peek(Token![const]) && (input.peek2(Ident) || input.peek2(Token![_])) { + let defaultness = false; + let flexible_item = FlexibleItemConst::parse(attrs, vis, defaultness, input)?; + Ok(ItemVerbatim::ConstFlexible(flexible_item)) + } else if input.peek(Token![const]) + || lookahead.peek(Token![async]) + || lookahead.peek(Token![unsafe]) && !input.peek2(Token![impl]) + || lookahead.peek(Token![extern]) + || lookahead.peek(Token![fn]) + { + let defaultness = false; + let flexible_item = FlexibleItemFn::parse(attrs, vis, defaultness, input)?; + Ok(ItemVerbatim::FnFlexible(flexible_item)) + } else if lookahead.peek(Token![default]) + || input.peek(Token![unsafe]) + || lookahead.peek(Token![impl]) + { + let defaultness = input.parse::>()?.is_some(); + let unsafety = input.parse::>()?.is_some(); + input.parse::()?; + let has_generics = input.peek(Token![<]) + && (input.peek2(Token![>]) + || input.peek2(Token![#]) + || (input.peek2(Ident) || input.peek2(Lifetime)) + && (input.peek3(Token![:]) + || input.peek3(Token![,]) + || input.peek3(Token![>]) + || input.peek3(Token![=])) + || input.peek2(Token![const])); + let mut generics: Generics = if has_generics { + input.parse()? + } else { + Generics::default() + }; + let constness: ImplConstness = input.parse()?; + let negative_impl = + !input.peek2(token::Brace) && input.parse::>()?.is_some(); + let first_ty: Type = input.parse()?; + let (trait_, self_ty) = if input.parse::>()?.is_some() { + (Some(first_ty), input.parse()?) + } else { + (None, first_ty) + }; + generics.where_clause = input.parse()?; + let content; + braced!(content in input); + let inner_attrs = content.call(Attribute::parse_inner)?; + attrs.extend(inner_attrs); + let mut items = Vec::new(); + while !content.is_empty() { + items.push(content.parse()?); + } + Ok(ItemVerbatim::ImplFlexible(ImplFlexible { + attrs, + vis, + defaultness, + unsafety, + generics, + constness, + negative_impl, + trait_, + self_ty, + items, + })) + } else if lookahead.peek(Token![macro]) { + input.parse::()?; + let ident: Ident = input.parse()?; + let args = if input.peek(token::Paren) { + let paren_content; + parenthesized!(paren_content in input); + Some(paren_content.parse::()?) + } else { + None + }; + let brace_content; + braced!(brace_content in input); + let body: TokenStream = brace_content.parse()?; + Ok(ItemVerbatim::Macro2(Macro2 { + attrs, + vis, + ident, + args, + body, + })) + } else if lookahead.peek(Token![static]) { + let flexible_item = FlexibleItemStatic::parse(attrs, vis, input)?; + Ok(ItemVerbatim::StaticFlexible(flexible_item)) + } else if lookahead.peek(Token![type]) { + let defaultness = false; + let flexible_item = FlexibleItemType::parse( + attrs, + vis, + defaultness, + input, + WhereClauseLocation::BeforeEq, + )?; + Ok(ItemVerbatim::TypeFlexible(flexible_item)) + } else if lookahead.peek(Token![use]) { + input.parse::()?; + let content; + braced!(content in input); + let trees = content.parse_terminated(RootUseTree::parse, Token![,])?; + input.parse::()?; + Ok(ItemVerbatim::UseBrace(UseBrace { attrs, vis, trees })) + } else { + Err(lookahead.error()) + } + } + } + + let item: ItemVerbatim = match syn::parse2(tokens.clone()) { + Ok(item) => item, + Err(_) => unimplemented!("Item::Verbatim `{}`", tokens), + }; + + match item { + ItemVerbatim::Empty => { + self.hardbreak(); + } + ItemVerbatim::ConstFlexible(item) => { + self.flexible_item_const(&item); + } + ItemVerbatim::FnFlexible(item) => { + self.flexible_item_fn(&item); + } + ItemVerbatim::ImplFlexible(item) => { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.ibox(-INDENT); + self.cbox(INDENT); + self.visibility(&item.vis); + if item.defaultness { + self.word("default "); + } + if item.unsafety { + self.word("unsafe "); + } + self.word("impl"); + self.generics(&item.generics); + self.end(); + self.nbsp(); + match item.constness { + ImplConstness::None => {} + ImplConstness::MaybeConst => self.word("?const "), + ImplConstness::Const => self.word("const "), + } + if item.negative_impl { + self.word("!"); + } + if let Some(trait_) = &item.trait_ { + self.ty(trait_); + self.space(); + self.word("for "); + } + self.ty(&item.self_ty); + self.end(); + self.where_clause_for_body(&item.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for impl_item in &item.items { + self.impl_item(impl_item); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + ItemVerbatim::Macro2(item) => { + self.outer_attrs(&item.attrs); + self.visibility(&item.vis); + self.word("macro "); + self.ident(&item.ident); + if let Some(args) = &item.args { + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + self.ibox(0); + self.macro_rules_tokens(args.clone(), true); + self.end(); + self.zerobreak(); + self.offset(-INDENT); + self.end(); + self.word(")"); + } + self.word(" {"); + if !item.body.is_empty() { + self.neverbreak(); + self.cbox(INDENT); + self.hardbreak(); + self.ibox(0); + self.macro_rules_tokens(item.body.clone(), false); + self.end(); + self.hardbreak(); + self.offset(-INDENT); + self.end(); + } + self.word("}"); + self.hardbreak(); + } + ItemVerbatim::StaticFlexible(item) => { + self.flexible_item_static(&item); + } + ItemVerbatim::TypeFlexible(item) => { + self.flexible_item_type(&item); + } + ItemVerbatim::UseBrace(item) => { + self.outer_attrs(&item.attrs); + self.visibility(&item.vis); + self.word("use "); + if item.trees.len() == 1 { + self.word("::"); + self.use_tree(&item.trees[0].inner); + } else { + self.cbox(INDENT); + self.word("{"); + self.zerobreak(); + self.ibox(0); + for use_tree in item.trees.iter().delimited() { + if use_tree.leading_colon.is_some() { + self.word("::"); + } + self.use_tree(&use_tree.inner); + if !use_tree.is_last { + self.word(","); + let mut use_tree = &use_tree.inner; + while let UseTree::Path(use_path) = use_tree { + use_tree = &use_path.tree; + } + if let UseTree::Group(_) = use_tree { + self.hardbreak(); + } else { + self.space(); + } + } + } + self.end(); + self.trailing_comma(true); + self.offset(-INDENT); + self.word("}"); + self.end(); + } + self.word(";"); + self.hardbreak(); + } + } + } + + fn use_tree(&mut self, use_tree: &UseTree) { + match use_tree { + UseTree::Path(use_path) => self.use_path(use_path), + UseTree::Name(use_name) => self.use_name(use_name), + UseTree::Rename(use_rename) => self.use_rename(use_rename), + UseTree::Glob(use_glob) => self.use_glob(use_glob), + UseTree::Group(use_group) => self.use_group(use_group), + } + } + + fn use_path(&mut self, use_path: &UsePath) { + self.ident(&use_path.ident); + self.word("::"); + self.use_tree(&use_path.tree); + } + + fn use_name(&mut self, use_name: &UseName) { + self.ident(&use_name.ident); + } + + fn use_rename(&mut self, use_rename: &UseRename) { + self.ident(&use_rename.ident); + self.word(" as "); + self.ident(&use_rename.rename); + } + + fn use_glob(&mut self, use_glob: &UseGlob) { + let _ = use_glob; + self.word("*"); + } + + fn use_group(&mut self, use_group: &UseGroup) { + if use_group.items.is_empty() { + self.word("{}"); + } else if use_group.items.len() == 1 { + self.use_tree(&use_group.items[0]); + } else { + self.cbox(INDENT); + self.word("{"); + self.zerobreak(); + self.ibox(0); + for use_tree in use_group.items.iter().delimited() { + self.use_tree(&use_tree); + if !use_tree.is_last { + self.word(","); + let mut use_tree = *use_tree; + while let UseTree::Path(use_path) = use_tree { + use_tree = &use_path.tree; + } + if let UseTree::Group(_) = use_tree { + self.hardbreak(); + } else { + self.space(); + } + } + } + self.end(); + self.trailing_comma(true); + self.offset(-INDENT); + self.word("}"); + self.end(); + } + } + + fn foreign_item(&mut self, foreign_item: &ForeignItem) { + match foreign_item { + ForeignItem::Fn(item) => self.foreign_item_fn(item), + ForeignItem::Static(item) => self.foreign_item_static(item), + ForeignItem::Type(item) => self.foreign_item_type(item), + ForeignItem::Macro(item) => self.foreign_item_macro(item), + ForeignItem::Verbatim(item) => self.foreign_item_verbatim(item), + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown ForeignItem"), + } + } + + fn foreign_item_fn(&mut self, foreign_item: &ForeignItemFn) { + self.outer_attrs(&foreign_item.attrs); + self.cbox(INDENT); + self.visibility(&foreign_item.vis); + self.signature(&foreign_item.sig); + self.where_clause_semi(&foreign_item.sig.generics.where_clause); + self.end(); + self.hardbreak(); + } + + fn foreign_item_static(&mut self, foreign_item: &ForeignItemStatic) { + self.outer_attrs(&foreign_item.attrs); + self.cbox(0); + self.visibility(&foreign_item.vis); + self.word("static "); + self.static_mutability(&foreign_item.mutability); + self.ident(&foreign_item.ident); + self.word(": "); + self.ty(&foreign_item.ty); + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn foreign_item_type(&mut self, foreign_item: &ForeignItemType) { + self.outer_attrs(&foreign_item.attrs); + self.cbox(0); + self.visibility(&foreign_item.vis); + self.word("type "); + self.ident(&foreign_item.ident); + self.generics(&foreign_item.generics); + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn foreign_item_macro(&mut self, foreign_item: &ForeignItemMacro) { + self.outer_attrs(&foreign_item.attrs); + self.mac(&foreign_item.mac, None); + self.mac_semi_if_needed(&foreign_item.mac.delimiter); + self.hardbreak(); + } + + #[cfg(not(feature = "verbatim"))] + fn foreign_item_verbatim(&mut self, foreign_item: &TokenStream) { + if !foreign_item.is_empty() { + unimplemented!("ForeignItem::Verbatim `{}`", foreign_item); + } + self.hardbreak(); + } + + #[cfg(feature = "verbatim")] + fn foreign_item_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::{Attribute, Token, Visibility}; + use verbatim::{FlexibleItemFn, FlexibleItemStatic, FlexibleItemType, WhereClauseLocation}; + + enum ForeignItemVerbatim { + Empty, + FnFlexible(FlexibleItemFn), + StaticFlexible(FlexibleItemStatic), + TypeFlexible(FlexibleItemType), + } + + impl Parse for ForeignItemVerbatim { + fn parse(input: ParseStream) -> Result { + if input.is_empty() { + return Ok(ForeignItemVerbatim::Empty); + } + + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let defaultness = false; + + let lookahead = input.lookahead1(); + if lookahead.peek(Token![const]) + || lookahead.peek(Token![async]) + || lookahead.peek(Token![unsafe]) + || lookahead.peek(Token![extern]) + || lookahead.peek(Token![fn]) + { + let flexible_item = FlexibleItemFn::parse(attrs, vis, defaultness, input)?; + Ok(ForeignItemVerbatim::FnFlexible(flexible_item)) + } else if lookahead.peek(Token![static]) { + let flexible_item = FlexibleItemStatic::parse(attrs, vis, input)?; + Ok(ForeignItemVerbatim::StaticFlexible(flexible_item)) + } else if lookahead.peek(Token![type]) { + let flexible_item = FlexibleItemType::parse( + attrs, + vis, + defaultness, + input, + WhereClauseLocation::Both, + )?; + Ok(ForeignItemVerbatim::TypeFlexible(flexible_item)) + } else { + Err(lookahead.error()) + } + } + } + + let foreign_item: ForeignItemVerbatim = match syn::parse2(tokens.clone()) { + Ok(foreign_item) => foreign_item, + Err(_) => unimplemented!("ForeignItem::Verbatim `{}`", tokens), + }; + + match foreign_item { + ForeignItemVerbatim::Empty => { + self.hardbreak(); + } + ForeignItemVerbatim::FnFlexible(foreign_item) => { + self.flexible_item_fn(&foreign_item); + } + ForeignItemVerbatim::StaticFlexible(foreign_item) => { + self.flexible_item_static(&foreign_item); + } + ForeignItemVerbatim::TypeFlexible(foreign_item) => { + self.flexible_item_type(&foreign_item); + } + } + } + + fn trait_item(&mut self, trait_item: &TraitItem) { + match trait_item { + TraitItem::Const(item) => self.trait_item_const(item), + TraitItem::Fn(item) => self.trait_item_fn(item), + TraitItem::Type(item) => self.trait_item_type(item), + TraitItem::Macro(item) => self.trait_item_macro(item), + TraitItem::Verbatim(item) => self.trait_item_verbatim(item), + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown TraitItem"), + } + } + + fn trait_item_const(&mut self, trait_item: &TraitItemConst) { + self.outer_attrs(&trait_item.attrs); + self.cbox(0); + self.word("const "); + self.ident(&trait_item.ident); + self.generics(&trait_item.generics); + self.word(": "); + self.ty(&trait_item.ty); + if let Some((_eq_token, default)) = &trait_item.default { + self.word(" = "); + self.neverbreak(); + self.expr(default); + } + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn trait_item_fn(&mut self, trait_item: &TraitItemFn) { + self.outer_attrs(&trait_item.attrs); + self.cbox(INDENT); + self.signature(&trait_item.sig); + if let Some(block) = &trait_item.default { + self.where_clause_for_body(&trait_item.sig.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&trait_item.attrs); + for stmt in &block.stmts { + self.stmt(stmt); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } else { + self.where_clause_semi(&trait_item.sig.generics.where_clause); + self.end(); + } + self.hardbreak(); + } + + fn trait_item_type(&mut self, trait_item: &TraitItemType) { + self.outer_attrs(&trait_item.attrs); + self.cbox(INDENT); + self.word("type "); + self.ident(&trait_item.ident); + self.generics(&trait_item.generics); + for bound in trait_item.bounds.iter().delimited() { + if bound.is_first { + self.word(": "); + } else { + self.space(); + self.word("+ "); + } + self.type_param_bound(&bound); + } + if let Some((_eq_token, default)) = &trait_item.default { + self.word(" = "); + self.neverbreak(); + self.ibox(-INDENT); + self.ty(default); + self.end(); + } + self.where_clause_oneline_semi(&trait_item.generics.where_clause); + self.end(); + self.hardbreak(); + } + + fn trait_item_macro(&mut self, trait_item: &TraitItemMacro) { + self.outer_attrs(&trait_item.attrs); + self.mac(&trait_item.mac, None); + self.mac_semi_if_needed(&trait_item.mac.delimiter); + self.hardbreak(); + } + + #[cfg(not(feature = "verbatim"))] + fn trait_item_verbatim(&mut self, trait_item: &TokenStream) { + if !trait_item.is_empty() { + unimplemented!("TraitItem::Verbatim `{}`", trait_item); + } + self.hardbreak(); + } + + #[cfg(feature = "verbatim")] + fn trait_item_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::{Attribute, Token, Visibility}; + use verbatim::{FlexibleItemType, WhereClauseLocation}; + + enum TraitItemVerbatim { + Empty, + TypeFlexible(FlexibleItemType), + PubOrDefault(PubOrDefaultTraitItem), + } + + struct PubOrDefaultTraitItem { + attrs: Vec, + vis: Visibility, + defaultness: bool, + trait_item: TraitItem, + } + + impl Parse for TraitItemVerbatim { + fn parse(input: ParseStream) -> Result { + if input.is_empty() { + return Ok(TraitItemVerbatim::Empty); + } + + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let defaultness = input.parse::>()?.is_some(); + + let lookahead = input.lookahead1(); + if lookahead.peek(Token![type]) { + let flexible_item = FlexibleItemType::parse( + attrs, + vis, + defaultness, + input, + WhereClauseLocation::AfterEq, + )?; + Ok(TraitItemVerbatim::TypeFlexible(flexible_item)) + } else if (lookahead.peek(Token![const]) + || lookahead.peek(Token![async]) + || lookahead.peek(Token![unsafe]) + || lookahead.peek(Token![extern]) + || lookahead.peek(Token![fn])) + && (!matches!(vis, Visibility::Inherited) || defaultness) + { + Ok(TraitItemVerbatim::PubOrDefault(PubOrDefaultTraitItem { + attrs, + vis, + defaultness, + trait_item: input.parse()?, + })) + } else { + Err(lookahead.error()) + } + } + } + + let impl_item: TraitItemVerbatim = match syn::parse2(tokens.clone()) { + Ok(impl_item) => impl_item, + Err(_) => unimplemented!("TraitItem::Verbatim `{}`", tokens), + }; + + match impl_item { + TraitItemVerbatim::Empty => { + self.hardbreak(); + } + TraitItemVerbatim::TypeFlexible(trait_item) => { + self.flexible_item_type(&trait_item); + } + TraitItemVerbatim::PubOrDefault(trait_item) => { + self.outer_attrs(&trait_item.attrs); + self.visibility(&trait_item.vis); + if trait_item.defaultness { + self.word("default "); + } + self.trait_item(&trait_item.trait_item); + } + } + } + + fn impl_item(&mut self, impl_item: &ImplItem) { + match impl_item { + ImplItem::Const(item) => self.impl_item_const(item), + ImplItem::Fn(item) => self.impl_item_fn(item), + ImplItem::Type(item) => self.impl_item_type(item), + ImplItem::Macro(item) => self.impl_item_macro(item), + ImplItem::Verbatim(item) => self.impl_item_verbatim(item), + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown ImplItem"), + } + } + + fn impl_item_const(&mut self, impl_item: &ImplItemConst) { + self.outer_attrs(&impl_item.attrs); + self.cbox(0); + self.visibility(&impl_item.vis); + if impl_item.defaultness.is_some() { + self.word("default "); + } + self.word("const "); + self.ident(&impl_item.ident); + self.generics(&impl_item.generics); + self.word(": "); + self.ty(&impl_item.ty); + self.word(" = "); + self.neverbreak(); + self.expr(&impl_item.expr); + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn impl_item_fn(&mut self, impl_item: &ImplItemFn) { + self.outer_attrs(&impl_item.attrs); + self.cbox(INDENT); + self.visibility(&impl_item.vis); + if impl_item.defaultness.is_some() { + self.word("default "); + } + self.signature(&impl_item.sig); + self.where_clause_for_body(&impl_item.sig.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&impl_item.attrs); + for stmt in &impl_item.block.stmts { + self.stmt(stmt); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn impl_item_type(&mut self, impl_item: &ImplItemType) { + self.outer_attrs(&impl_item.attrs); + self.cbox(INDENT); + self.visibility(&impl_item.vis); + if impl_item.defaultness.is_some() { + self.word("default "); + } + self.word("type "); + self.ident(&impl_item.ident); + self.generics(&impl_item.generics); + self.word(" = "); + self.neverbreak(); + self.ibox(-INDENT); + self.ty(&impl_item.ty); + self.end(); + self.where_clause_oneline_semi(&impl_item.generics.where_clause); + self.end(); + self.hardbreak(); + } + + fn impl_item_macro(&mut self, impl_item: &ImplItemMacro) { + self.outer_attrs(&impl_item.attrs); + self.mac(&impl_item.mac, None); + self.mac_semi_if_needed(&impl_item.mac.delimiter); + self.hardbreak(); + } + + #[cfg(not(feature = "verbatim"))] + fn impl_item_verbatim(&mut self, impl_item: &TokenStream) { + if !impl_item.is_empty() { + unimplemented!("ImplItem::Verbatim `{}`", impl_item); + } + self.hardbreak(); + } + + #[cfg(feature = "verbatim")] + fn impl_item_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::{Attribute, Ident, Token, Visibility}; + use verbatim::{FlexibleItemConst, FlexibleItemFn, FlexibleItemType, WhereClauseLocation}; + + enum ImplItemVerbatim { + Empty, + ConstFlexible(FlexibleItemConst), + FnFlexible(FlexibleItemFn), + TypeFlexible(FlexibleItemType), + } + + impl Parse for ImplItemVerbatim { + fn parse(input: ParseStream) -> Result { + if input.is_empty() { + return Ok(ImplItemVerbatim::Empty); + } + + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let defaultness = input.parse::>()?.is_some(); + + let lookahead = input.lookahead1(); + if lookahead.peek(Token![const]) && (input.peek2(Ident) || input.peek2(Token![_])) { + let flexible_item = FlexibleItemConst::parse(attrs, vis, defaultness, input)?; + Ok(ImplItemVerbatim::ConstFlexible(flexible_item)) + } else if input.peek(Token![const]) + || lookahead.peek(Token![async]) + || lookahead.peek(Token![unsafe]) + || lookahead.peek(Token![extern]) + || lookahead.peek(Token![fn]) + { + let flexible_item = FlexibleItemFn::parse(attrs, vis, defaultness, input)?; + Ok(ImplItemVerbatim::FnFlexible(flexible_item)) + } else if lookahead.peek(Token![type]) { + let flexible_item = FlexibleItemType::parse( + attrs, + vis, + defaultness, + input, + WhereClauseLocation::AfterEq, + )?; + Ok(ImplItemVerbatim::TypeFlexible(flexible_item)) + } else { + Err(lookahead.error()) + } + } + } + + let impl_item: ImplItemVerbatim = match syn::parse2(tokens.clone()) { + Ok(impl_item) => impl_item, + Err(_) => unimplemented!("ImplItem::Verbatim `{}`", tokens), + }; + + match impl_item { + ImplItemVerbatim::Empty => { + self.hardbreak(); + } + ImplItemVerbatim::ConstFlexible(impl_item) => { + self.flexible_item_const(&impl_item); + } + ImplItemVerbatim::FnFlexible(impl_item) => { + self.flexible_item_fn(&impl_item); + } + ImplItemVerbatim::TypeFlexible(impl_item) => { + self.flexible_item_type(&impl_item); + } + } + } + + fn signature(&mut self, signature: &Signature) { + if signature.constness.is_some() { + self.word("const "); + } + if signature.asyncness.is_some() { + self.word("async "); + } + if signature.unsafety.is_some() { + self.word("unsafe "); + } + if let Some(abi) = &signature.abi { + self.abi(abi); + } + self.word("fn "); + self.ident(&signature.ident); + self.generics(&signature.generics); + self.word("("); + self.neverbreak(); + self.cbox(0); + self.zerobreak(); + for input in signature.inputs.iter().delimited() { + self.fn_arg(&input); + let is_last = input.is_last && signature.variadic.is_none(); + self.trailing_comma(is_last); + } + if let Some(variadic) = &signature.variadic { + self.variadic(variadic); + self.zerobreak(); + } + self.offset(-INDENT); + self.end(); + self.word(")"); + self.cbox(-INDENT); + self.return_type(&signature.output); + self.end(); + } + + fn fn_arg(&mut self, fn_arg: &FnArg) { + match fn_arg { + FnArg::Receiver(receiver) => self.receiver(receiver), + FnArg::Typed(pat_type) => self.pat_type(pat_type), + } + } + + fn receiver(&mut self, receiver: &Receiver) { + self.outer_attrs(&receiver.attrs); + if let Some((_ampersand, lifetime)) = &receiver.reference { + self.word("&"); + if let Some(lifetime) = lifetime { + self.lifetime(lifetime); + self.nbsp(); + } + } + if receiver.mutability.is_some() { + self.word("mut "); + } + self.word("self"); + if receiver.colon_token.is_some() { + self.word(": "); + self.ty(&receiver.ty); + } else { + let consistent = match (&receiver.reference, &receiver.mutability, &*receiver.ty) { + (Some(_), mutability, Type::Reference(ty)) => { + mutability.is_some() == ty.mutability.is_some() + && match &*ty.elem { + Type::Path(ty) => ty.qself.is_none() && ty.path.is_ident("Self"), + _ => false, + } + } + (None, _, Type::Path(ty)) => ty.qself.is_none() && ty.path.is_ident("Self"), + _ => false, + }; + if !consistent { + self.word(": "); + self.ty(&receiver.ty); + } + } + } + + fn variadic(&mut self, variadic: &Variadic) { + self.outer_attrs(&variadic.attrs); + if let Some((pat, _colon)) = &variadic.pat { + self.pat(pat); + self.word(": "); + } + self.word("..."); + } + + fn static_mutability(&mut self, mutability: &StaticMutability) { + match mutability { + StaticMutability::Mut(_) => self.word("mut "), + StaticMutability::None => {} + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown StaticMutability"), + } + } +} + +#[cfg(feature = "verbatim")] +mod verbatim { + use crate::algorithm::Printer; + use crate::iter::IterDelimited; + use crate::INDENT; + use syn::ext::IdentExt; + use syn::parse::{ParseStream, Result}; + use syn::{ + braced, token, Attribute, Block, Expr, Generics, Ident, Signature, StaticMutability, Stmt, + Token, Type, TypeParamBound, Visibility, WhereClause, + }; + + pub struct FlexibleItemConst { + pub attrs: Vec, + pub vis: Visibility, + pub defaultness: bool, + pub ident: Ident, + pub ty: Type, + } + + pub struct FlexibleItemFn { + pub attrs: Vec, + pub vis: Visibility, + pub defaultness: bool, + pub sig: Signature, + pub body: Option>, + } + + pub struct FlexibleItemStatic { + pub attrs: Vec, + pub vis: Visibility, + pub mutability: StaticMutability, + pub ident: Ident, + pub ty: Option, + pub expr: Option, + } + + pub struct FlexibleItemType { + pub attrs: Vec, + pub vis: Visibility, + pub defaultness: bool, + pub ident: Ident, + pub generics: Generics, + pub bounds: Vec, + pub definition: Option, + pub where_clause_after_eq: Option, + } + + pub enum WhereClauseLocation { + // type Ty where T: 'static = T; + BeforeEq, + // type Ty = T where T: 'static; + AfterEq, + // TODO: goes away once the migration period on rust-lang/rust#89122 is over + Both, + } + + impl FlexibleItemConst { + pub fn parse( + attrs: Vec, + vis: Visibility, + defaultness: bool, + input: ParseStream, + ) -> Result { + input.parse::()?; + let ident = input.call(Ident::parse_any)?; + input.parse::()?; + let ty: Type = input.parse()?; + input.parse::()?; + + Ok(FlexibleItemConst { + attrs, + vis, + defaultness, + ident, + ty, + }) + } + } + + impl FlexibleItemFn { + pub fn parse( + mut attrs: Vec, + vis: Visibility, + defaultness: bool, + input: ParseStream, + ) -> Result { + let sig: Signature = input.parse()?; + + let lookahead = input.lookahead1(); + let body = if lookahead.peek(Token![;]) { + input.parse::()?; + None + } else if lookahead.peek(token::Brace) { + let content; + braced!(content in input); + attrs.extend(content.call(Attribute::parse_inner)?); + Some(content.call(Block::parse_within)?) + } else { + return Err(lookahead.error()); + }; + + Ok(FlexibleItemFn { + attrs, + vis, + defaultness, + sig, + body, + }) + } + } + + impl FlexibleItemStatic { + pub fn parse(attrs: Vec, vis: Visibility, input: ParseStream) -> Result { + input.parse::()?; + let mutability: StaticMutability = input.parse()?; + let ident = input.parse()?; + + let lookahead = input.lookahead1(); + let has_type = lookahead.peek(Token![:]); + let has_expr = lookahead.peek(Token![=]); + if !has_type && !has_expr { + return Err(lookahead.error()); + } + + let ty: Option = if has_type { + input.parse::()?; + input.parse().map(Some)? + } else { + None + }; + + let expr: Option = if input.parse::>()?.is_some() { + input.parse().map(Some)? + } else { + None + }; + + input.parse::()?; + + Ok(FlexibleItemStatic { + attrs, + vis, + mutability, + ident, + ty, + expr, + }) + } + } + + impl FlexibleItemType { + pub fn parse( + attrs: Vec, + vis: Visibility, + defaultness: bool, + input: ParseStream, + where_clause_location: WhereClauseLocation, + ) -> Result { + input.parse::()?; + let ident: Ident = input.parse()?; + let mut generics: Generics = input.parse()?; + + let mut bounds = Vec::new(); + if input.parse::>()?.is_some() { + loop { + if input.peek(Token![where]) || input.peek(Token![=]) || input.peek(Token![;]) { + break; + } + bounds.push(input.parse::()?); + if input.peek(Token![where]) || input.peek(Token![=]) || input.peek(Token![;]) { + break; + } + input.parse::()?; + } + } + + match where_clause_location { + WhereClauseLocation::BeforeEq | WhereClauseLocation::Both => { + generics.where_clause = input.parse()?; + } + WhereClauseLocation::AfterEq => {} + } + + let definition = if input.parse::>()?.is_some() { + Some(input.parse()?) + } else { + None + }; + + let where_clause_after_eq = match where_clause_location { + WhereClauseLocation::AfterEq | WhereClauseLocation::Both + if generics.where_clause.is_none() => + { + input.parse()? + } + _ => None, + }; + + input.parse::()?; + + Ok(FlexibleItemType { + attrs, + vis, + defaultness, + ident, + generics, + bounds, + definition, + where_clause_after_eq, + }) + } + } + + impl Printer { + pub fn flexible_item_const(&mut self, item: &FlexibleItemConst) { + self.outer_attrs(&item.attrs); + self.cbox(0); + self.visibility(&item.vis); + if item.defaultness { + self.word("default "); + } + self.word("const "); + self.ident(&item.ident); + self.word(": "); + self.ty(&item.ty); + self.word(";"); + self.end(); + self.hardbreak(); + } + + pub fn flexible_item_fn(&mut self, item: &FlexibleItemFn) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + if item.defaultness { + self.word("default "); + } + self.signature(&item.sig); + if let Some(body) = &item.body { + self.where_clause_for_body(&item.sig.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for stmt in body { + self.stmt(stmt); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } else { + self.where_clause_semi(&item.sig.generics.where_clause); + self.end(); + } + self.hardbreak(); + } + + pub fn flexible_item_static(&mut self, item: &FlexibleItemStatic) { + self.outer_attrs(&item.attrs); + self.cbox(0); + self.visibility(&item.vis); + self.word("static "); + self.static_mutability(&item.mutability); + self.ident(&item.ident); + if let Some(ty) = &item.ty { + self.word(": "); + self.ty(ty); + } + if let Some(expr) = &item.expr { + self.word(" = "); + self.neverbreak(); + self.expr(expr); + } + self.word(";"); + self.end(); + self.hardbreak(); + } + + pub fn flexible_item_type(&mut self, item: &FlexibleItemType) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + if item.defaultness { + self.word("default "); + } + self.word("type "); + self.ident(&item.ident); + self.generics(&item.generics); + for bound in item.bounds.iter().delimited() { + if bound.is_first { + self.word(": "); + } else { + self.space(); + self.word("+ "); + } + self.type_param_bound(&bound); + } + if let Some(definition) = &item.definition { + self.where_clause_oneline(&item.generics.where_clause); + self.word("= "); + self.neverbreak(); + self.ibox(-INDENT); + self.ty(definition); + self.end(); + self.where_clause_oneline_semi(&item.where_clause_after_eq); + } else { + self.where_clause_oneline_semi(&item.generics.where_clause); + } + self.end(); + self.hardbreak(); + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/iter.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/iter.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/iter.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/iter.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,46 @@ +use std::iter::Peekable; +use std::ops::Deref; + +pub struct Delimited { + is_first: bool, + iter: Peekable, +} + +pub trait IterDelimited: Iterator + Sized { + fn delimited(self) -> Delimited { + Delimited { + is_first: true, + iter: self.peekable(), + } + } +} + +impl IterDelimited for I {} + +pub struct IteratorItem { + value: T, + pub is_first: bool, + pub is_last: bool, +} + +impl Iterator for Delimited { + type Item = IteratorItem; + + fn next(&mut self) -> Option { + let item = IteratorItem { + value: self.iter.next()?, + is_first: self.is_first, + is_last: self.iter.peek().is_none(), + }; + self.is_first = false; + Some(item) + } +} + +impl Deref for IteratorItem { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.value + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/lib.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/lib.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/lib.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,379 @@ +//! [![github]](https://github.com/dtolnay/prettyplease) [![crates-io]](https://crates.io/crates/prettyplease) [![docs-rs]](https://docs.rs/prettyplease) +//! +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust +//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs +//! +//!
+//! +//! **prettyplease::unparse** — a minimal `syn` syntax tree pretty-printer +//! +//!
+//! +//! # Overview +//! +//! This is a pretty-printer to turn a `syn` syntax tree into a `String` of +//! well-formatted source code. In contrast to rustfmt, this library is intended +//! to be suitable for arbitrary generated code. +//! +//! Rustfmt prioritizes high-quality output that is impeccable enough that you'd +//! be comfortable spending your career staring at its output — but that +//! means some heavyweight algorithms, and it has a tendency to bail out on code +//! that is hard to format (for example [rustfmt#3697], and there are dozens +//! more issues like it). That's not necessarily a big deal for human-generated +//! code because when code gets highly nested, the human will naturally be +//! inclined to refactor into more easily formattable code. But for generated +//! code, having the formatter just give up leaves it totally unreadable. +//! +//! [rustfmt#3697]: https://github.com/rust-lang/rustfmt/issues/3697 +//! +//! This library is designed using the simplest possible algorithm and data +//! structures that can deliver about 95% of the quality of rustfmt-formatted +//! output. In my experience testing real-world code, approximately 97-98% of +//! output lines come out identical between rustfmt's formatting and this +//! crate's. The rest have slightly different linebreak decisions, but still +//! clearly follow the dominant modern Rust style. +//! +//! The tradeoffs made by this crate are a good fit for generated code that you +//! will *not* spend your career staring at. For example, the output of +//! `bindgen`, or the output of `cargo-expand`. In those cases it's more +//! important that the whole thing be formattable without the formatter giving +//! up, than that it be flawless. +//! +//!
+//! +//! # Feature matrix +//! +//! Here are a few superficial comparisons of this crate against the AST +//! pretty-printer built into rustc, and rustfmt. The sections below go into +//! more detail comparing the output of each of these libraries. +//! +//! | | prettyplease | rustc | rustfmt | +//! |:---|:---:|:---:|:---:| +//! | non-pathological behavior on big or generated code | 💚 | ❌ | ❌ | +//! | idiomatic modern formatting ("locally indistinguishable from rustfmt") | 💚 | ❌ | 💚 | +//! | throughput | 60 MB/s | 39 MB/s | 2.8 MB/s | +//! | number of dependencies | 3 | 72 | 66 | +//! | compile time including dependencies | 2.4 sec | 23.1 sec | 29.8 sec | +//! | buildable using a stable Rust compiler | 💚 | ❌ | ❌ | +//! | published to crates.io | 💚 | ❌ | ❌ | +//! | extensively configurable output | ❌ | ❌ | 💚 | +//! | intended to accommodate hand-maintained source code | ❌ | ❌ | 💚 | +//! +//!
+//! +//! # Comparison to rustfmt +//! +//! - [input.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/input.rs) +//! - [output.prettyplease.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.prettyplease.rs) +//! - [output.rustfmt.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.rustfmt.rs) +//! +//! If you weren't told which output file is which, it would be practically +//! impossible to tell — **except** for line 435 in the rustfmt output, +//! which is more than 1000 characters long because rustfmt just gave up +//! formatting that part of the file: +//! +//! ``` +//! # const _: &str = stringify! {{{ +//! match segments[5] { +//! 0 => write!(f, "::{}", ipv4), +//! 0xffff => write!(f, "::ffff:{}", ipv4), +//! _ => unreachable!(), +//! } +//! } else { # [derive (Copy , Clone , Default)] struct Span { start : usize , len : usize , } let zeroes = { let mut longest = Span :: default () ; let mut current = Span :: default () ; for (i , & segment) in segments . iter () . enumerate () { if segment == 0 { if current . len == 0 { current . start = i ; } current . len += 1 ; if current . len > longest . len { longest = current ; } } else { current = Span :: default () ; } } longest } ; # [doc = " Write a colon-separated part of the address"] # [inline] fn fmt_subslice (f : & mut fmt :: Formatter < '_ > , chunk : & [u16]) -> fmt :: Result { if let Some ((first , tail)) = chunk . split_first () { write ! (f , "{:x}" , first) ? ; for segment in tail { f . write_char (':') ? ; write ! (f , "{:x}" , segment) ? ; } } Ok (()) } if zeroes . len > 1 { fmt_subslice (f , & segments [.. zeroes . start]) ? ; f . write_str ("::") ? ; fmt_subslice (f , & segments [zeroes . start + zeroes . len ..]) } else { fmt_subslice (f , & segments) } } +//! } else { +//! const IPV6_BUF_LEN: usize = (4 * 8) + 7; +//! let mut buf = [0u8; IPV6_BUF_LEN]; +//! let mut buf_slice = &mut buf[..]; +//! # }}; +//! ``` +//! +//! This is a pretty typical manifestation of rustfmt bailing out in generated +//! code — a chunk of the input ends up on one line. The other +//! manifestation is that you're working on some code, running rustfmt on save +//! like a conscientious developer, but after a while notice it isn't doing +//! anything. You introduce an intentional formatting issue, like a stray indent +//! or semicolon, and run rustfmt to check your suspicion. Nope, it doesn't get +//! cleaned up — rustfmt is just not formatting the part of the file you +//! are working on. +//! +//! The prettyplease library is designed to have no pathological cases that +//! force a bail out; the entire input you give it will get formatted in some +//! "good enough" form. +//! +//! Separately, rustfmt can be problematic to integrate into projects. It's +//! written using rustc's internal syntax tree, so it can't be built by a stable +//! compiler. Its releases are not regularly published to crates.io, so in Cargo +//! builds you'd need to depend on it as a git dependency, which precludes +//! publishing your crate to crates.io also. You can shell out to a `rustfmt` +//! binary, but that'll be whatever rustfmt version is installed on each +//! developer's system (if any), which can lead to spurious diffs in checked-in +//! generated code formatted by different versions. In contrast prettyplease is +//! designed to be easy to pull in as a library, and compiles fast. +//! +//!
+//! +//! # Comparison to rustc_ast_pretty +//! +//! - [input.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/input.rs) +//! - [output.prettyplease.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.prettyplease.rs) +//! - [output.rustc.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.rustc.rs) +//! +//! This is the pretty-printer that gets used when rustc prints source code, +//! such as `rustc -Zunpretty=expanded`. It's used also by the standard +//! library's `stringify!` when stringifying an interpolated macro_rules AST +//! fragment, like an $:expr, and transitively by `dbg!` and many macros in the +//! ecosystem. +//! +//! Rustc's formatting is mostly okay, but does not hew closely to the dominant +//! contemporary style of Rust formatting. Some things wouldn't ever be written +//! on one line, like this `match` expression, and certainly not with a comma in +//! front of the closing brace: +//! +//! ``` +//! # const _: &str = stringify! { +//! fn eq(&self, other: &IpAddr) -> bool { +//! match other { IpAddr::V4(v4) => self == v4, IpAddr::V6(_) => false, } +//! } +//! # }; +//! ``` +//! +//! Some places use non-multiple-of-4 indentation, which is definitely not the +//! norm: +//! +//! ``` +//! # const _: &str = stringify! { +//! pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { +//! let [a, b, c, d] = self.octets(); +//! Ipv6Addr{inner: +//! c::in6_addr{s6_addr: +//! [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, +//! 0xFF, a, b, c, d],},} +//! } +//! # }; +//! ``` +//! +//! And although there isn't an egregious example of it in the link because the +//! input code is pretty tame, in general rustc_ast_pretty has pathological +//! behavior on generated code. It has a tendency to use excessive horizontal +//! indentation and rapidly run out of width: +//! +//! ``` +//! # const _: &str = stringify! { +//! ::std::io::_print(::core::fmt::Arguments::new_v1(&[""], +//! &match (&msg,) { +//! _args => +//! [::core::fmt::ArgumentV1::new(_args.0, +//! ::core::fmt::Display::fmt)], +//! })); +//! # }; +//! ``` +//! +//! The snippets above are clearly different from modern rustfmt style. In +//! contrast, prettyplease is designed to have output that is practically +//! indistinguishable from rustfmt-formatted code. +//! +//!
+//! +//! # Example +//! +//! ``` +//! // [dependencies] +//! // prettyplease = "0.2" +//! // syn = { version = "2", default-features = false, features = ["full", "parsing"] } +//! +//! const INPUT: &str = stringify! { +//! use crate::{ +//! lazy::{Lazy, SyncLazy, SyncOnceCell}, panic, +//! sync::{ atomic::{AtomicUsize, Ordering::SeqCst}, +//! mpsc::channel, Mutex, }, +//! thread, +//! }; +//! impl Into for T where U: From { +//! fn into(self) -> U { U::from(self) } +//! } +//! }; +//! +//! fn main() { +//! let syntax_tree = syn::parse_file(INPUT).unwrap(); +//! let formatted = prettyplease::unparse(&syntax_tree); +//! print!("{}", formatted); +//! } +//! ``` +//! +//!
+//! +//! # Algorithm notes +//! +//! The approach and terminology used in the implementation are derived from +//! [*Derek C. Oppen, "Pretty Printing" (1979)*][paper], on which +//! rustc_ast_pretty is also based, and from rustc_ast_pretty's implementation +//! written by Graydon Hoare in 2011 (and modernized over the years by dozens of +//! volunteer maintainers). +//! +//! [paper]: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/770/CS-TR-79-770.pdf +//! +//! The paper describes two language-agnostic interacting procedures `Scan()` +//! and `Print()`. Language-specific code decomposes an input data structure +//! into a stream of `string` and `break` tokens, and `begin` and `end` tokens +//! for grouping. Each `begin`–`end` range may be identified as either +//! "consistent breaking" or "inconsistent breaking". If a group is consistently +//! breaking, then if the whole contents do not fit on the line, *every* `break` +//! token in the group will receive a linebreak. This is appropriate, for +//! example, for Rust struct literals, or arguments of a function call. If a +//! group is inconsistently breaking, then the `string` tokens in the group are +//! greedily placed on the line until out of space, and linebroken only at those +//! `break` tokens for which the next string would not fit. For example, this is +//! appropriate for the contents of a braced `use` statement in Rust. +//! +//! Scan's job is to efficiently accumulate sizing information about groups and +//! breaks. For every `begin` token we compute the distance to the matched `end` +//! token, and for every `break` we compute the distance to the next `break`. +//! The algorithm uses a ringbuffer to hold tokens whose size is not yet +//! ascertained. The maximum size of the ringbuffer is bounded by the target +//! line length and does not grow indefinitely, regardless of deep nesting in +//! the input stream. That's because once a group is sufficiently big, the +//! precise size can no longer make a difference to linebreak decisions and we +//! can effectively treat it as "infinity". +//! +//! Print's job is to use the sizing information to efficiently assign a +//! "broken" or "not broken" status to every `begin` token. At that point the +//! output is easily constructed by concatenating `string` tokens and breaking +//! at `break` tokens contained within a broken group. +//! +//! Leveraging these primitives (i.e. cleverly placing the all-or-nothing +//! consistent breaks and greedy inconsistent breaks) to yield +//! rustfmt-compatible formatting for all of Rust's syntax tree nodes is a fun +//! challenge. +//! +//! Here is a visualization of some Rust tokens fed into the pretty printing +//! algorithm. Consistently breaking `begin`—`end` pairs are represented +//! by `«`⁠`»`, inconsistently breaking by `‹`⁠`›`, `break` by `·`, +//! and the rest of the non-whitespace are `string`. +//! +//! ```text +//! use crate::«{· +//! ‹ lazy::«{·‹Lazy,· SyncLazy,· SyncOnceCell›·}»,· +//! panic,· +//! sync::«{· +//! ‹ atomic::«{·‹AtomicUsize,· Ordering::SeqCst›·}»,· +//! mpsc::channel,· Mutex›,· +//! }»,· +//! thread›,· +//! }»;· +//! «‹«impl<«·T‹›,· U‹›·»>» Into<«·U·»>· for T›· +//! where· +//! U:‹ From<«·T·»>›,· +//! {· +//! « fn into(·«·self·») -> U {· +//! ‹ U::from(«·self·»)›· +//! » }· +//! »}· +//! ``` +//! +//! The algorithm described in the paper is not quite sufficient for producing +//! well-formatted Rust code that is locally indistinguishable from rustfmt's +//! style. The reason is that in the paper, the complete non-whitespace contents +//! are assumed to be independent of linebreak decisions, with Scan and Print +//! being only in control of the whitespace (spaces and line breaks). In Rust as +//! idiomatically formattted by rustfmt, that is not the case. Trailing commas +//! are one example; the punctuation is only known *after* the broken vs +//! non-broken status of the surrounding group is known: +//! +//! ``` +//! # struct Struct { x: u64, y: bool } +//! # let xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx = 0; +//! # let yyyyyyyyyyyyyyyyyyyyyyyyyyyyyy = true; +//! # +//! let _ = Struct { x: 0, y: true }; +//! +//! let _ = Struct { +//! x: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx, +//! y: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyy, //<- trailing comma if the expression wrapped +//! }; +//! ``` +//! +//! The formatting of `match` expressions is another case; we want small arms on +//! the same line as the pattern, and big arms wrapped in a brace. The presence +//! of the brace punctuation, comma, and semicolon are all dependent on whether +//! the arm fits on the line: +//! +//! ``` +//! # struct Entry { nanos: u32 } +//! # let total_nanos = 0u64; +//! # let mut total_secs = 0u64; +//! # let tmp; +//! # let entry = Entry { nanos: 0 }; +//! # const NANOS_PER_SEC: u32 = 1_000_000_000; +//! # +//! match total_nanos.checked_add(entry.nanos as u64) { +//! Some(n) => tmp = n, //<- small arm, inline with comma +//! None => { +//! total_secs = total_secs +//! .checked_add(total_nanos / NANOS_PER_SEC as u64) +//! .expect("overflow in iter::sum over durations"); +//! } //<- big arm, needs brace added, and also semicolon^ +//! } +//! ``` +//! +//! The printing algorithm implementation in this crate accommodates all of +//! these situations with conditional punctuation tokens whose selection can be +//! deferred and populated after it's known that the group is or is not broken. + +#![doc(html_root_url = "https://docs.rs/prettyplease/0.2.4")] +#![allow( + clippy::cast_possible_wrap, + clippy::cast_sign_loss, + clippy::derive_partial_eq_without_eq, + clippy::doc_markdown, + clippy::enum_glob_use, + clippy::items_after_statements, + clippy::let_underscore_untyped, + clippy::match_like_matches_macro, + clippy::match_same_arms, + clippy::module_name_repetitions, + clippy::must_use_candidate, + clippy::needless_pass_by_value, + clippy::similar_names, + clippy::too_many_lines, + clippy::unused_self, + clippy::vec_init_then_push +)] +#![cfg_attr(all(test, exhaustive), feature(non_exhaustive_omitted_patterns_lint))] + +mod algorithm; +mod attr; +mod convenience; +mod data; +mod expr; +mod file; +mod generics; +mod item; +mod iter; +mod lifetime; +mod lit; +mod mac; +mod pat; +mod path; +mod ring; +mod stmt; +mod token; +mod ty; + +use crate::algorithm::Printer; +use syn::File; + +// Target line width. +const MARGIN: isize = 89; + +// Number of spaces increment at each level of block indentation. +const INDENT: isize = 4; + +// Every line is allowed at least this much space, even if highly indented. +const MIN_SPACE: isize = 60; + +pub fn unparse(file: &File) -> String { + let mut p = Printer::new(); + p.file(file); + p.eof() +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/lifetime.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/lifetime.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/lifetime.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/lifetime.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,9 @@ +use crate::algorithm::Printer; +use syn::Lifetime; + +impl Printer { + pub fn lifetime(&mut self, lifetime: &Lifetime) { + self.word("'"); + self.ident(&lifetime.ident); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/lit.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/lit.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/lit.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/lit.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,52 @@ +use crate::algorithm::Printer; +use proc_macro2::Literal; +use syn::{Lit, LitBool, LitByte, LitByteStr, LitChar, LitFloat, LitInt, LitStr}; + +impl Printer { + pub fn lit(&mut self, lit: &Lit) { + match lit { + Lit::Str(lit) => self.lit_str(lit), + Lit::ByteStr(lit) => self.lit_byte_str(lit), + Lit::Byte(lit) => self.lit_byte(lit), + Lit::Char(lit) => self.lit_char(lit), + Lit::Int(lit) => self.lit_int(lit), + Lit::Float(lit) => self.lit_float(lit), + Lit::Bool(lit) => self.lit_bool(lit), + Lit::Verbatim(lit) => self.lit_verbatim(lit), + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown Lit"), + } + } + + pub fn lit_str(&mut self, lit: &LitStr) { + self.word(lit.token().to_string()); + } + + fn lit_byte_str(&mut self, lit: &LitByteStr) { + self.word(lit.token().to_string()); + } + + fn lit_byte(&mut self, lit: &LitByte) { + self.word(lit.token().to_string()); + } + + fn lit_char(&mut self, lit: &LitChar) { + self.word(lit.token().to_string()); + } + + fn lit_int(&mut self, lit: &LitInt) { + self.word(lit.token().to_string()); + } + + fn lit_float(&mut self, lit: &LitFloat) { + self.word(lit.token().to_string()); + } + + fn lit_bool(&mut self, lit: &LitBool) { + self.word(if lit.value { "true" } else { "false" }); + } + + fn lit_verbatim(&mut self, token: &Literal) { + self.word(token.to_string()); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/mac.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/mac.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/mac.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/mac.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,219 @@ +use crate::algorithm::Printer; +use crate::path::PathKind; +use crate::token::Token; +use crate::INDENT; +use proc_macro2::{Delimiter, Spacing, TokenStream}; +use syn::{Ident, Macro, MacroDelimiter}; + +impl Printer { + pub fn mac(&mut self, mac: &Macro, ident: Option<&Ident>) { + if mac.path.is_ident("macro_rules") { + if let Some(ident) = ident { + self.macro_rules(ident, &mac.tokens); + return; + } + } + self.path(&mac.path, PathKind::Simple); + self.word("!"); + if let Some(ident) = ident { + self.nbsp(); + self.ident(ident); + } + let (open, close, delimiter_break) = match mac.delimiter { + MacroDelimiter::Paren(_) => ("(", ")", Self::zerobreak as fn(&mut Self)), + MacroDelimiter::Brace(_) => (" {", "}", Self::hardbreak as fn(&mut Self)), + MacroDelimiter::Bracket(_) => ("[", "]", Self::zerobreak as fn(&mut Self)), + }; + self.word(open); + if !mac.tokens.is_empty() { + self.cbox(INDENT); + delimiter_break(self); + self.ibox(0); + self.macro_rules_tokens(mac.tokens.clone(), false); + self.end(); + delimiter_break(self); + self.offset(-INDENT); + self.end(); + } + self.word(close); + } + + pub fn mac_semi_if_needed(&mut self, delimiter: &MacroDelimiter) { + match delimiter { + MacroDelimiter::Paren(_) | MacroDelimiter::Bracket(_) => self.word(";"), + MacroDelimiter::Brace(_) => {} + } + } + + fn macro_rules(&mut self, name: &Ident, rules: &TokenStream) { + enum State { + Start, + Matcher, + Equal, + Greater, + Expander, + } + + use State::*; + + self.word("macro_rules! "); + self.ident(name); + self.word(" {"); + self.cbox(INDENT); + self.hardbreak_if_nonempty(); + let mut state = State::Start; + for tt in rules.clone() { + let token = Token::from(tt); + match (state, token) { + (Start, Token::Group(delimiter, stream)) => { + self.delimiter_open(delimiter); + if !stream.is_empty() { + self.cbox(INDENT); + self.zerobreak(); + self.ibox(0); + self.macro_rules_tokens(stream, true); + self.end(); + self.zerobreak(); + self.offset(-INDENT); + self.end(); + } + self.delimiter_close(delimiter); + state = Matcher; + } + (Matcher, Token::Punct('=', Spacing::Joint)) => { + self.word(" ="); + state = Equal; + } + (Equal, Token::Punct('>', Spacing::Alone)) => { + self.word(">"); + state = Greater; + } + (Greater, Token::Group(_delimiter, stream)) => { + self.word(" {"); + self.neverbreak(); + if !stream.is_empty() { + self.cbox(INDENT); + self.hardbreak(); + self.ibox(0); + self.macro_rules_tokens(stream, false); + self.end(); + self.hardbreak(); + self.offset(-INDENT); + self.end(); + } + self.word("}"); + state = Expander; + } + (Expander, Token::Punct(';', Spacing::Alone)) => { + self.word(";"); + self.hardbreak(); + state = Start; + } + _ => unimplemented!("bad macro_rules syntax"), + } + } + match state { + Start => {} + Expander => { + self.word(";"); + self.hardbreak(); + } + _ => self.hardbreak(), + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + + pub fn macro_rules_tokens(&mut self, stream: TokenStream, matcher: bool) { + #[derive(PartialEq)] + enum State { + Start, + Dollar, + DollarIdent, + DollarIdentColon, + DollarParen, + DollarParenSep, + Pound, + PoundBang, + Dot, + Colon, + Colon2, + Ident, + IdentBang, + Delim, + Other, + } + + use State::*; + + let mut state = Start; + let mut previous_is_joint = true; + for tt in stream { + let token = Token::from(tt); + let (needs_space, next_state) = match (&state, &token) { + (Dollar, Token::Ident(_)) => (false, if matcher { DollarIdent } else { Other }), + (DollarIdent, Token::Punct(':', Spacing::Alone)) => (false, DollarIdentColon), + (DollarIdentColon, Token::Ident(_)) => (false, Other), + (DollarParen, Token::Punct('+' | '*' | '?', Spacing::Alone)) => (false, Other), + (DollarParen, Token::Ident(_) | Token::Literal(_)) => (false, DollarParenSep), + (DollarParen, Token::Punct(_, Spacing::Joint)) => (false, DollarParen), + (DollarParen, Token::Punct(_, Spacing::Alone)) => (false, DollarParenSep), + (DollarParenSep, Token::Punct('+' | '*', _)) => (false, Other), + (Pound, Token::Punct('!', _)) => (false, PoundBang), + (Dollar, Token::Group(Delimiter::Parenthesis, _)) => (false, DollarParen), + (Pound | PoundBang, Token::Group(Delimiter::Bracket, _)) => (false, Other), + (Ident, Token::Group(Delimiter::Parenthesis | Delimiter::Bracket, _)) => { + (false, Delim) + } + (Ident, Token::Punct('!', Spacing::Alone)) => (false, IdentBang), + (IdentBang, Token::Group(Delimiter::Parenthesis | Delimiter::Bracket, _)) => { + (false, Other) + } + (Colon, Token::Punct(':', _)) => (false, Colon2), + (_, Token::Group(Delimiter::Parenthesis | Delimiter::Bracket, _)) => (true, Delim), + (_, Token::Group(Delimiter::Brace | Delimiter::None, _)) => (true, Other), + (_, Token::Ident(ident)) if !is_keyword(ident) => { + (state != Dot && state != Colon2, Ident) + } + (_, Token::Literal(_)) => (state != Dot, Ident), + (_, Token::Punct(',' | ';', _)) => (false, Other), + (_, Token::Punct('.', _)) if !matcher => (state != Ident && state != Delim, Dot), + (_, Token::Punct(':', Spacing::Joint)) => (state != Ident, Colon), + (_, Token::Punct('$', _)) => (true, Dollar), + (_, Token::Punct('#', _)) => (true, Pound), + (_, _) => (true, Other), + }; + if !previous_is_joint { + if needs_space { + self.space(); + } else if let Token::Punct('.', _) = token { + self.zerobreak(); + } + } + previous_is_joint = match token { + Token::Punct(_, Spacing::Joint) | Token::Punct('$', _) => true, + _ => false, + }; + self.single_token( + token, + if matcher { + |printer, stream| printer.macro_rules_tokens(stream, true) + } else { + |printer, stream| printer.macro_rules_tokens(stream, false) + }, + ); + state = next_state; + } + } +} + +fn is_keyword(ident: &Ident) -> bool { + match ident.to_string().as_str() { + "as" | "async" | "await" | "box" | "break" | "const" | "continue" | "crate" | "dyn" + | "else" | "enum" | "extern" | "fn" | "for" | "if" | "impl" | "in" | "let" | "loop" + | "macro" | "match" | "mod" | "move" | "mut" | "pub" | "ref" | "return" | "static" + | "struct" | "trait" | "type" | "unsafe" | "use" | "where" | "while" | "yield" => true, + _ => false, + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/path.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/path.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/path.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/path.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,207 @@ +use crate::algorithm::Printer; +use crate::iter::IterDelimited; +use crate::INDENT; +use std::ptr; +use syn::{ + AngleBracketedGenericArguments, AssocConst, AssocType, Constraint, Expr, GenericArgument, + ParenthesizedGenericArguments, Path, PathArguments, PathSegment, QSelf, +}; + +#[derive(Copy, Clone, PartialEq)] +pub enum PathKind { + // a::B + Simple, + // a::B + Type, + // a::B:: + Expr, +} + +impl Printer { + pub fn path(&mut self, path: &Path, kind: PathKind) { + assert!(!path.segments.is_empty()); + for segment in path.segments.iter().delimited() { + if !segment.is_first || path.leading_colon.is_some() { + self.word("::"); + } + self.path_segment(&segment, kind); + } + } + + pub fn path_segment(&mut self, segment: &PathSegment, kind: PathKind) { + self.ident(&segment.ident); + self.path_arguments(&segment.arguments, kind); + } + + fn path_arguments(&mut self, arguments: &PathArguments, kind: PathKind) { + match arguments { + PathArguments::None => {} + PathArguments::AngleBracketed(arguments) => { + self.angle_bracketed_generic_arguments(arguments, kind); + } + PathArguments::Parenthesized(arguments) => { + self.parenthesized_generic_arguments(arguments); + } + } + } + + fn generic_argument(&mut self, arg: &GenericArgument) { + match arg { + GenericArgument::Lifetime(lifetime) => self.lifetime(lifetime), + GenericArgument::Type(ty) => self.ty(ty), + GenericArgument::Const(expr) => { + match expr { + Expr::Lit(expr) => self.expr_lit(expr), + Expr::Block(expr) => self.expr_block(expr), + // ERROR CORRECTION: Add braces to make sure that the + // generated code is valid. + _ => { + self.word("{"); + self.expr(expr); + self.word("}"); + } + } + } + GenericArgument::AssocType(assoc) => self.assoc_type(assoc), + GenericArgument::AssocConst(assoc) => self.assoc_const(assoc), + GenericArgument::Constraint(constraint) => self.constraint(constraint), + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown GenericArgument"), + } + } + + pub fn angle_bracketed_generic_arguments( + &mut self, + generic: &AngleBracketedGenericArguments, + path_kind: PathKind, + ) { + if generic.args.is_empty() || path_kind == PathKind::Simple { + return; + } + + if path_kind == PathKind::Expr { + self.word("::"); + } + self.word("<"); + self.cbox(INDENT); + self.zerobreak(); + + // Print lifetimes before types/consts/bindings, regardless of their + // order in self.args. + #[derive(Ord, PartialOrd, Eq, PartialEq)] + enum Group { + First, + Second, + } + fn group(arg: &GenericArgument) -> Group { + match arg { + GenericArgument::Lifetime(_) => Group::First, + GenericArgument::Type(_) + | GenericArgument::Const(_) + | GenericArgument::AssocType(_) + | GenericArgument::AssocConst(_) + | GenericArgument::Constraint(_) => Group::Second, + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => Group::Second, + } + } + let last = generic.args.iter().max_by_key(|param| group(param)); + for current_group in [Group::First, Group::Second] { + for arg in &generic.args { + if group(arg) == current_group { + self.generic_argument(arg); + self.trailing_comma(ptr::eq(arg, last.unwrap())); + } + } + } + + self.offset(-INDENT); + self.end(); + self.word(">"); + } + + fn assoc_type(&mut self, assoc: &AssocType) { + self.ident(&assoc.ident); + if let Some(generics) = &assoc.generics { + self.angle_bracketed_generic_arguments(generics, PathKind::Type); + } + self.word(" = "); + self.ty(&assoc.ty); + } + + fn assoc_const(&mut self, assoc: &AssocConst) { + self.ident(&assoc.ident); + if let Some(generics) = &assoc.generics { + self.angle_bracketed_generic_arguments(generics, PathKind::Type); + } + self.word(" = "); + self.expr(&assoc.value); + } + + fn constraint(&mut self, constraint: &Constraint) { + self.ident(&constraint.ident); + if let Some(generics) = &constraint.generics { + self.angle_bracketed_generic_arguments(generics, PathKind::Type); + } + self.ibox(INDENT); + for bound in constraint.bounds.iter().delimited() { + if bound.is_first { + self.word(": "); + } else { + self.space(); + self.word("+ "); + } + self.type_param_bound(&bound); + } + self.end(); + } + + fn parenthesized_generic_arguments(&mut self, arguments: &ParenthesizedGenericArguments) { + self.cbox(INDENT); + self.word("("); + self.zerobreak(); + for ty in arguments.inputs.iter().delimited() { + self.ty(&ty); + self.trailing_comma(ty.is_last); + } + self.offset(-INDENT); + self.word(")"); + self.return_type(&arguments.output); + self.end(); + } + + pub fn qpath(&mut self, qself: &Option, path: &Path, kind: PathKind) { + let qself = match qself { + Some(qself) => qself, + None => { + self.path(path, kind); + return; + } + }; + + assert!(qself.position < path.segments.len()); + + self.word("<"); + self.ty(&qself.ty); + + let mut segments = path.segments.iter(); + if qself.position > 0 { + self.word(" as "); + for segment in segments.by_ref().take(qself.position).delimited() { + if !segment.is_first || path.leading_colon.is_some() { + self.word("::"); + } + self.path_segment(&segment, PathKind::Type); + if segment.is_last { + self.word(">"); + } + } + } else { + self.word(">"); + } + for segment in segments { + self.word("::"); + self.path_segment(segment, kind); + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/pat.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/pat.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/pat.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/pat.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,246 @@ +use crate::algorithm::Printer; +use crate::iter::IterDelimited; +use crate::path::PathKind; +use crate::INDENT; +use proc_macro2::TokenStream; +use syn::{ + FieldPat, Pat, PatIdent, PatOr, PatParen, PatReference, PatRest, PatSlice, PatStruct, PatTuple, + PatTupleStruct, PatType, PatWild, +}; + +impl Printer { + pub fn pat(&mut self, pat: &Pat) { + match pat { + Pat::Const(pat) => self.expr_const(pat), + Pat::Ident(pat) => self.pat_ident(pat), + Pat::Lit(pat) => self.expr_lit(pat), + Pat::Macro(pat) => self.expr_macro(pat), + Pat::Or(pat) => self.pat_or(pat), + Pat::Paren(pat) => self.pat_paren(pat), + Pat::Path(pat) => self.expr_path(pat), + Pat::Range(pat) => self.expr_range(pat), + Pat::Reference(pat) => self.pat_reference(pat), + Pat::Rest(pat) => self.pat_rest(pat), + Pat::Slice(pat) => self.pat_slice(pat), + Pat::Struct(pat) => self.pat_struct(pat), + Pat::Tuple(pat) => self.pat_tuple(pat), + Pat::TupleStruct(pat) => self.pat_tuple_struct(pat), + Pat::Type(pat) => self.pat_type(pat), + Pat::Verbatim(pat) => self.pat_verbatim(pat), + Pat::Wild(pat) => self.pat_wild(pat), + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown Pat"), + } + } + + fn pat_ident(&mut self, pat: &PatIdent) { + self.outer_attrs(&pat.attrs); + if pat.by_ref.is_some() { + self.word("ref "); + } + if pat.mutability.is_some() { + self.word("mut "); + } + self.ident(&pat.ident); + if let Some((_at_token, subpat)) = &pat.subpat { + self.word(" @ "); + self.pat(subpat); + } + } + + fn pat_or(&mut self, pat: &PatOr) { + self.outer_attrs(&pat.attrs); + let mut consistent_break = false; + for case in &pat.cases { + match case { + Pat::Lit(_) | Pat::Wild(_) => {} + _ => { + consistent_break = true; + break; + } + } + } + if consistent_break { + self.cbox(0); + } else { + self.ibox(0); + } + for case in pat.cases.iter().delimited() { + if !case.is_first { + self.space(); + self.word("| "); + } + self.pat(&case); + } + self.end(); + } + + fn pat_paren(&mut self, pat: &PatParen) { + self.outer_attrs(&pat.attrs); + self.word("("); + self.pat(&pat.pat); + self.word(")"); + } + + fn pat_reference(&mut self, pat: &PatReference) { + self.outer_attrs(&pat.attrs); + self.word("&"); + if pat.mutability.is_some() { + self.word("mut "); + } + self.pat(&pat.pat); + } + + fn pat_rest(&mut self, pat: &PatRest) { + self.outer_attrs(&pat.attrs); + self.word(".."); + } + + fn pat_slice(&mut self, pat: &PatSlice) { + self.outer_attrs(&pat.attrs); + self.word("["); + for elem in pat.elems.iter().delimited() { + self.pat(&elem); + self.trailing_comma(elem.is_last); + } + self.word("]"); + } + + fn pat_struct(&mut self, pat: &PatStruct) { + self.outer_attrs(&pat.attrs); + self.cbox(INDENT); + self.path(&pat.path, PathKind::Expr); + self.word(" {"); + self.space_if_nonempty(); + for field in pat.fields.iter().delimited() { + self.field_pat(&field); + self.trailing_comma_or_space(field.is_last && pat.rest.is_none()); + } + if let Some(rest) = &pat.rest { + self.pat_rest(rest); + self.space(); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + + fn pat_tuple(&mut self, pat: &PatTuple) { + self.outer_attrs(&pat.attrs); + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + for elem in pat.elems.iter().delimited() { + self.pat(&elem); + if pat.elems.len() == 1 { + if pat.elems.trailing_punct() { + self.word(","); + } + self.zerobreak(); + } else { + self.trailing_comma(elem.is_last); + } + } + self.offset(-INDENT); + self.end(); + self.word(")"); + } + + fn pat_tuple_struct(&mut self, pat: &PatTupleStruct) { + self.outer_attrs(&pat.attrs); + self.path(&pat.path, PathKind::Expr); + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + for elem in pat.elems.iter().delimited() { + self.pat(&elem); + self.trailing_comma(elem.is_last); + } + self.offset(-INDENT); + self.end(); + self.word(")"); + } + + pub fn pat_type(&mut self, pat: &PatType) { + self.outer_attrs(&pat.attrs); + self.pat(&pat.pat); + self.word(": "); + self.ty(&pat.ty); + } + + #[cfg(not(feature = "verbatim"))] + fn pat_verbatim(&mut self, pat: &TokenStream) { + unimplemented!("Pat::Verbatim `{}`", pat); + } + + #[cfg(feature = "verbatim")] + fn pat_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::{braced, Attribute, Block, Token}; + + enum PatVerbatim { + Box(Pat), + Const(PatConst), + } + + struct PatConst { + attrs: Vec, + block: Block, + } + + impl Parse for PatVerbatim { + fn parse(input: ParseStream) -> Result { + let lookahead = input.lookahead1(); + if lookahead.peek(Token![box]) { + input.parse::()?; + let inner = Pat::parse_single(input)?; + Ok(PatVerbatim::Box(inner)) + } else if lookahead.peek(Token![const]) { + input.parse::()?; + let content; + let brace_token = braced!(content in input); + let attrs = content.call(Attribute::parse_inner)?; + let stmts = content.call(Block::parse_within)?; + Ok(PatVerbatim::Const(PatConst { + attrs, + block: Block { brace_token, stmts }, + })) + } else { + Err(lookahead.error()) + } + } + } + + let pat: PatVerbatim = match syn::parse2(tokens.clone()) { + Ok(pat) => pat, + Err(_) => unimplemented!("Pat::Verbatim `{}`", tokens), + }; + + match pat { + PatVerbatim::Box(pat) => { + self.word("box "); + self.pat(&pat); + } + PatVerbatim::Const(pat) => { + self.word("const "); + self.cbox(INDENT); + self.small_block(&pat.block, &pat.attrs); + self.end(); + } + } + } + + fn pat_wild(&mut self, pat: &PatWild) { + self.outer_attrs(&pat.attrs); + self.word("_"); + } + + fn field_pat(&mut self, field_pat: &FieldPat) { + self.outer_attrs(&field_pat.attrs); + if field_pat.colon_token.is_some() { + self.member(&field_pat.member); + self.word(": "); + } + self.pat(&field_pat.pat); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/ring.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/ring.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/ring.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/ring.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,81 @@ +use std::collections::VecDeque; +use std::ops::{Index, IndexMut}; + +pub struct RingBuffer { + data: VecDeque, + // Abstract index of data[0] in infinitely sized queue + offset: usize, +} + +impl RingBuffer { + pub fn new() -> Self { + RingBuffer { + data: VecDeque::new(), + offset: 0, + } + } + + pub fn is_empty(&self) -> bool { + self.data.is_empty() + } + + pub fn len(&self) -> usize { + self.data.len() + } + + pub fn push(&mut self, value: T) -> usize { + let index = self.offset + self.data.len(); + self.data.push_back(value); + index + } + + pub fn clear(&mut self) { + self.data.clear(); + } + + pub fn index_of_first(&self) -> usize { + self.offset + } + + pub fn first(&self) -> &T { + &self.data[0] + } + + pub fn first_mut(&mut self) -> &mut T { + &mut self.data[0] + } + + pub fn pop_first(&mut self) -> T { + self.offset += 1; + self.data.pop_front().unwrap() + } + + pub fn last(&self) -> &T { + self.data.back().unwrap() + } + + pub fn last_mut(&mut self) -> &mut T { + self.data.back_mut().unwrap() + } + + pub fn second_last(&self) -> &T { + &self.data[self.data.len() - 2] + } + + pub fn pop_last(&mut self) { + self.data.pop_back().unwrap(); + } +} + +impl Index for RingBuffer { + type Output = T; + fn index(&self, index: usize) -> &Self::Output { + &self.data[index.checked_sub(self.offset).unwrap()] + } +} + +impl IndexMut for RingBuffer { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + &mut self.data[index.checked_sub(self.offset).unwrap()] + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/stmt.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/stmt.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/stmt.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/stmt.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,209 @@ +use crate::algorithm::Printer; +use crate::INDENT; +use syn::{BinOp, Expr, Stmt}; + +impl Printer { + pub fn stmt(&mut self, stmt: &Stmt) { + match stmt { + Stmt::Local(local) => { + self.outer_attrs(&local.attrs); + self.ibox(0); + self.word("let "); + self.pat(&local.pat); + if let Some(local_init) = &local.init { + self.word(" = "); + self.neverbreak(); + self.expr(&local_init.expr); + if let Some((_else, diverge)) = &local_init.diverge { + self.word(" else "); + if let Expr::Block(expr) = diverge.as_ref() { + self.small_block(&expr.block, &[]); + } else { + self.word("{"); + self.space(); + self.ibox(INDENT); + self.expr(diverge); + self.end(); + self.space(); + self.offset(-INDENT); + self.word("}"); + } + } + } + self.word(";"); + self.end(); + self.hardbreak(); + } + Stmt::Item(item) => self.item(item), + Stmt::Expr(expr, None) => { + if break_after(expr) { + self.ibox(0); + self.expr_beginning_of_line(expr, true); + if add_semi(expr) { + self.word(";"); + } + self.end(); + self.hardbreak(); + } else { + self.expr_beginning_of_line(expr, true); + } + } + Stmt::Expr(expr, Some(_semi)) => { + if let Expr::Verbatim(tokens) = expr { + if tokens.is_empty() { + return; + } + } + self.ibox(0); + self.expr_beginning_of_line(expr, true); + if !remove_semi(expr) { + self.word(";"); + } + self.end(); + self.hardbreak(); + } + Stmt::Macro(stmt) => { + self.outer_attrs(&stmt.attrs); + self.mac(&stmt.mac, None); + self.mac_semi_if_needed(&stmt.mac.delimiter); + self.hardbreak(); + } + } + } +} + +pub fn add_semi(expr: &Expr) -> bool { + match expr { + Expr::Assign(_) | Expr::Break(_) | Expr::Continue(_) | Expr::Return(_) | Expr::Yield(_) => { + true + } + Expr::Binary(expr) => match expr.op { + BinOp::AddAssign(_) + | BinOp::SubAssign(_) + | BinOp::MulAssign(_) + | BinOp::DivAssign(_) + | BinOp::RemAssign(_) + | BinOp::BitXorAssign(_) + | BinOp::BitAndAssign(_) + | BinOp::BitOrAssign(_) + | BinOp::ShlAssign(_) + | BinOp::ShrAssign(_) => true, + BinOp::Add(_) + | BinOp::Sub(_) + | BinOp::Mul(_) + | BinOp::Div(_) + | BinOp::Rem(_) + | BinOp::And(_) + | BinOp::Or(_) + | BinOp::BitXor(_) + | BinOp::BitAnd(_) + | BinOp::BitOr(_) + | BinOp::Shl(_) + | BinOp::Shr(_) + | BinOp::Eq(_) + | BinOp::Lt(_) + | BinOp::Le(_) + | BinOp::Ne(_) + | BinOp::Ge(_) + | BinOp::Gt(_) => false, + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown BinOp"), + }, + Expr::Group(group) => add_semi(&group.expr), + + Expr::Array(_) + | Expr::Async(_) + | Expr::Await(_) + | Expr::Block(_) + | Expr::Call(_) + | Expr::Cast(_) + | Expr::Closure(_) + | Expr::Const(_) + | Expr::Field(_) + | Expr::ForLoop(_) + | Expr::If(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Let(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Range(_) + | Expr::Reference(_) + | Expr::Repeat(_) + | Expr::Struct(_) + | Expr::Try(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unary(_) + | Expr::Unsafe(_) + | Expr::Verbatim(_) + | Expr::While(_) => false, + + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => false, + } +} + +pub fn break_after(expr: &Expr) -> bool { + if let Expr::Group(group) = expr { + if let Expr::Verbatim(verbatim) = group.expr.as_ref() { + return !verbatim.is_empty(); + } + } + true +} + +fn remove_semi(expr: &Expr) -> bool { + match expr { + Expr::ForLoop(_) | Expr::While(_) => true, + Expr::Group(group) => remove_semi(&group.expr), + Expr::If(expr) => match &expr.else_branch { + Some((_else_token, else_branch)) => remove_semi(else_branch), + None => true, + }, + + Expr::Array(_) + | Expr::Assign(_) + | Expr::Async(_) + | Expr::Await(_) + | Expr::Binary(_) + | Expr::Block(_) + | Expr::Break(_) + | Expr::Call(_) + | Expr::Cast(_) + | Expr::Closure(_) + | Expr::Continue(_) + | Expr::Const(_) + | Expr::Field(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Let(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Range(_) + | Expr::Reference(_) + | Expr::Repeat(_) + | Expr::Return(_) + | Expr::Struct(_) + | Expr::Try(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unary(_) + | Expr::Unsafe(_) + | Expr::Verbatim(_) + | Expr::Yield(_) => false, + + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => false, + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/token.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/token.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/token.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/token.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,80 @@ +use crate::algorithm::Printer; +use proc_macro2::{Delimiter, Ident, Literal, Spacing, TokenStream, TokenTree}; + +impl Printer { + pub fn single_token(&mut self, token: Token, group_contents: fn(&mut Self, TokenStream)) { + match token { + Token::Group(delimiter, stream) => self.token_group(delimiter, stream, group_contents), + Token::Ident(ident) => self.ident(&ident), + Token::Punct(ch, _spacing) => self.token_punct(ch), + Token::Literal(literal) => self.token_literal(&literal), + } + } + + fn token_group( + &mut self, + delimiter: Delimiter, + stream: TokenStream, + group_contents: fn(&mut Self, TokenStream), + ) { + self.delimiter_open(delimiter); + if !stream.is_empty() { + if delimiter == Delimiter::Brace { + self.space(); + } + group_contents(self, stream); + if delimiter == Delimiter::Brace { + self.space(); + } + } + self.delimiter_close(delimiter); + } + + pub fn ident(&mut self, ident: &Ident) { + self.word(ident.to_string()); + } + + pub fn token_punct(&mut self, ch: char) { + self.word(ch.to_string()); + } + + pub fn token_literal(&mut self, literal: &Literal) { + self.word(literal.to_string()); + } + + pub fn delimiter_open(&mut self, delimiter: Delimiter) { + self.word(match delimiter { + Delimiter::Parenthesis => "(", + Delimiter::Brace => "{", + Delimiter::Bracket => "[", + Delimiter::None => return, + }); + } + + pub fn delimiter_close(&mut self, delimiter: Delimiter) { + self.word(match delimiter { + Delimiter::Parenthesis => ")", + Delimiter::Brace => "}", + Delimiter::Bracket => "]", + Delimiter::None => return, + }); + } +} + +pub enum Token { + Group(Delimiter, TokenStream), + Ident(Ident), + Punct(char, Spacing), + Literal(Literal), +} + +impl From for Token { + fn from(tt: TokenTree) -> Self { + match tt { + TokenTree::Group(group) => Token::Group(group.delimiter(), group.stream()), + TokenTree::Ident(ident) => Token::Ident(ident), + TokenTree::Punct(punct) => Token::Punct(punct.as_char(), punct.spacing()), + TokenTree::Literal(literal) => Token::Literal(literal), + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/ty.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/ty.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/ty.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/prettyplease/src/ty.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,286 @@ +use crate::algorithm::Printer; +use crate::iter::IterDelimited; +use crate::path::PathKind; +use crate::INDENT; +use proc_macro2::TokenStream; +use syn::{ + Abi, BareFnArg, BareVariadic, ReturnType, Type, TypeArray, TypeBareFn, TypeGroup, + TypeImplTrait, TypeInfer, TypeMacro, TypeNever, TypeParen, TypePath, TypePtr, TypeReference, + TypeSlice, TypeTraitObject, TypeTuple, +}; + +impl Printer { + pub fn ty(&mut self, ty: &Type) { + match ty { + Type::Array(ty) => self.type_array(ty), + Type::BareFn(ty) => self.type_bare_fn(ty), + Type::Group(ty) => self.type_group(ty), + Type::ImplTrait(ty) => self.type_impl_trait(ty), + Type::Infer(ty) => self.type_infer(ty), + Type::Macro(ty) => self.type_macro(ty), + Type::Never(ty) => self.type_never(ty), + Type::Paren(ty) => self.type_paren(ty), + Type::Path(ty) => self.type_path(ty), + Type::Ptr(ty) => self.type_ptr(ty), + Type::Reference(ty) => self.type_reference(ty), + Type::Slice(ty) => self.type_slice(ty), + Type::TraitObject(ty) => self.type_trait_object(ty), + Type::Tuple(ty) => self.type_tuple(ty), + Type::Verbatim(ty) => self.type_verbatim(ty), + #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + _ => unimplemented!("unknown Type"), + } + } + + fn type_array(&mut self, ty: &TypeArray) { + self.word("["); + self.ty(&ty.elem); + self.word("; "); + self.expr(&ty.len); + self.word("]"); + } + + fn type_bare_fn(&mut self, ty: &TypeBareFn) { + if let Some(bound_lifetimes) = &ty.lifetimes { + self.bound_lifetimes(bound_lifetimes); + } + if ty.unsafety.is_some() { + self.word("unsafe "); + } + if let Some(abi) = &ty.abi { + self.abi(abi); + } + self.word("fn("); + self.cbox(INDENT); + self.zerobreak(); + for bare_fn_arg in ty.inputs.iter().delimited() { + self.bare_fn_arg(&bare_fn_arg); + self.trailing_comma(bare_fn_arg.is_last && ty.variadic.is_none()); + } + if let Some(variadic) = &ty.variadic { + self.bare_variadic(variadic); + self.zerobreak(); + } + self.offset(-INDENT); + self.end(); + self.word(")"); + self.return_type(&ty.output); + } + + fn type_group(&mut self, ty: &TypeGroup) { + self.ty(&ty.elem); + } + + fn type_impl_trait(&mut self, ty: &TypeImplTrait) { + self.word("impl "); + for type_param_bound in ty.bounds.iter().delimited() { + if !type_param_bound.is_first { + self.word(" + "); + } + self.type_param_bound(&type_param_bound); + } + } + + fn type_infer(&mut self, ty: &TypeInfer) { + let _ = ty; + self.word("_"); + } + + fn type_macro(&mut self, ty: &TypeMacro) { + self.mac(&ty.mac, None); + } + + fn type_never(&mut self, ty: &TypeNever) { + let _ = ty; + self.word("!"); + } + + fn type_paren(&mut self, ty: &TypeParen) { + self.word("("); + self.ty(&ty.elem); + self.word(")"); + } + + fn type_path(&mut self, ty: &TypePath) { + self.qpath(&ty.qself, &ty.path, PathKind::Type); + } + + fn type_ptr(&mut self, ty: &TypePtr) { + self.word("*"); + if ty.mutability.is_some() { + self.word("mut "); + } else { + self.word("const "); + } + self.ty(&ty.elem); + } + + fn type_reference(&mut self, ty: &TypeReference) { + self.word("&"); + if let Some(lifetime) = &ty.lifetime { + self.lifetime(lifetime); + self.nbsp(); + } + if ty.mutability.is_some() { + self.word("mut "); + } + self.ty(&ty.elem); + } + + fn type_slice(&mut self, ty: &TypeSlice) { + self.word("["); + self.ty(&ty.elem); + self.word("]"); + } + + fn type_trait_object(&mut self, ty: &TypeTraitObject) { + self.word("dyn "); + for type_param_bound in ty.bounds.iter().delimited() { + if !type_param_bound.is_first { + self.word(" + "); + } + self.type_param_bound(&type_param_bound); + } + } + + fn type_tuple(&mut self, ty: &TypeTuple) { + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + for elem in ty.elems.iter().delimited() { + self.ty(&elem); + if ty.elems.len() == 1 { + self.word(","); + self.zerobreak(); + } else { + self.trailing_comma(elem.is_last); + } + } + self.offset(-INDENT); + self.end(); + self.word(")"); + } + + #[cfg(not(feature = "verbatim"))] + fn type_verbatim(&mut self, ty: &TokenStream) { + unimplemented!("Type::Verbatim `{}`", ty); + } + + #[cfg(feature = "verbatim")] + fn type_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::punctuated::Punctuated; + use syn::{Token, TypeParamBound}; + + enum TypeVerbatim { + DynStar(DynStar), + MutSelf(MutSelf), + NotType(NotType), + } + + struct DynStar { + bounds: Punctuated, + } + + struct MutSelf { + ty: Option, + } + + struct NotType { + inner: Type, + } + + impl Parse for TypeVerbatim { + fn parse(input: ParseStream) -> Result { + let lookahead = input.lookahead1(); + if lookahead.peek(Token![dyn]) { + input.parse::()?; + input.parse::()?; + let bounds = input.parse_terminated(TypeParamBound::parse, Token![+])?; + Ok(TypeVerbatim::DynStar(DynStar { bounds })) + } else if lookahead.peek(Token![mut]) { + input.parse::()?; + input.parse::()?; + let ty = if input.is_empty() { + None + } else { + input.parse::()?; + let ty: Type = input.parse()?; + Some(ty) + }; + Ok(TypeVerbatim::MutSelf(MutSelf { ty })) + } else if lookahead.peek(Token![!]) { + input.parse::()?; + let inner: Type = input.parse()?; + Ok(TypeVerbatim::NotType(NotType { inner })) + } else { + Err(lookahead.error()) + } + } + } + + let ty: TypeVerbatim = match syn::parse2(tokens.clone()) { + Ok(ty) => ty, + Err(_) => unimplemented!("Type::Verbatim `{}`", tokens), + }; + + match ty { + TypeVerbatim::DynStar(ty) => { + self.word("dyn* "); + for type_param_bound in ty.bounds.iter().delimited() { + if !type_param_bound.is_first { + self.word(" + "); + } + self.type_param_bound(&type_param_bound); + } + } + TypeVerbatim::MutSelf(bare_fn_arg) => { + self.word("mut self"); + if let Some(ty) = &bare_fn_arg.ty { + self.word(": "); + self.ty(ty); + } + } + TypeVerbatim::NotType(ty) => { + self.word("!"); + self.ty(&ty.inner); + } + } + } + + pub fn return_type(&mut self, ty: &ReturnType) { + match ty { + ReturnType::Default => {} + ReturnType::Type(_arrow, ty) => { + self.word(" -> "); + self.ty(ty); + } + } + } + + fn bare_fn_arg(&mut self, bare_fn_arg: &BareFnArg) { + self.outer_attrs(&bare_fn_arg.attrs); + if let Some((name, _colon)) = &bare_fn_arg.name { + self.ident(name); + self.word(": "); + } + self.ty(&bare_fn_arg.ty); + } + + fn bare_variadic(&mut self, variadic: &BareVariadic) { + self.outer_attrs(&variadic.attrs); + if let Some((name, _colon)) = &variadic.name { + self.ident(name); + self.word(": "); + } + self.word("..."); + } + + pub fn abi(&mut self, abi: &Abi) { + self.word("extern "); + if let Some(name) = &abi.name { + self.lit_str(name); + self.nbsp(); + } + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/build.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/build.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/build.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/build.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,11 +1,5 @@ // rustc-cfg emitted by the build script: // -// "use_proc_macro" -// Link to extern crate proc_macro. Available on any compiler and any target -// except wasm32. Requires "proc-macro" Cargo cfg to be enabled (default is -// enabled). On wasm32 we never link to proc_macro even if "proc-macro" cfg -// is enabled. -// // "wrap_proc_macro" // Wrap types from libproc_macro rather than polyfilling the whole API. // Enabled on rustc 1.29+ as long as procmacro2_semver_exempt is not set, @@ -41,21 +35,17 @@ // 1.57+. use std::env; -use std::process::{self, Command}; +use std::process::Command; use std::str; +use std::u32; fn main() { println!("cargo:rerun-if-changed=build.rs"); - let version = match rustc_version() { - Some(version) => version, - None => return, - }; - - if version.minor < 31 { - eprintln!("Minimum supported rustc version is 1.31"); - process::exit(1); - } + let version = rustc_version().unwrap_or(RustcVersion { + minor: u32::MAX, + nightly: false, + }); let docs_rs = env::var_os("DOCS_RS").is_some(); let semver_exempt = cfg!(procmacro2_semver_exempt) || docs_rs; @@ -68,53 +58,23 @@ println!("cargo:rustc-cfg=span_locations"); } - if version.minor < 32 { - println!("cargo:rustc-cfg=no_libprocmacro_unwind_safe"); - } - - if version.minor < 39 { - println!("cargo:rustc-cfg=no_bind_by_move_pattern_guard"); - } - - if version.minor < 44 { - println!("cargo:rustc-cfg=no_lexerror_display"); - } - - if version.minor < 45 { - println!("cargo:rustc-cfg=no_hygiene"); - } - - if version.minor < 47 { - println!("cargo:rustc-cfg=no_ident_new_raw"); - } - - if version.minor < 54 { - println!("cargo:rustc-cfg=no_literal_from_str"); - } - - if version.minor < 55 { - println!("cargo:rustc-cfg=no_group_open_close"); - } - if version.minor < 57 { println!("cargo:rustc-cfg=no_is_available"); } - let target = env::var("TARGET").unwrap(); - if !enable_use_proc_macro(&target) { - return; + if version.minor < 66 { + println!("cargo:rustc-cfg=no_source_text"); } - println!("cargo:rustc-cfg=use_proc_macro"); + if !cfg!(feature = "proc-macro") { + return; + } if version.nightly || !semver_exempt { println!("cargo:rustc-cfg=wrap_proc_macro"); } - if version.nightly - && feature_allowed("proc_macro_span") - && feature_allowed("proc_macro_span_shrink") - { + if version.nightly && feature_allowed("proc_macro_span") { println!("cargo:rustc-cfg=proc_macro_span"); } @@ -123,16 +83,6 @@ } } -fn enable_use_proc_macro(target: &str) -> bool { - // wasm targets don't have the `proc_macro` crate, disable this feature. - if target.contains("wasm32") { - return false; - } - - // Otherwise, only enable it if our feature is actually enabled. - cfg!(feature = "proc-macro") -} - struct RustcVersion { minor: u32, nightly: bool, diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/.cargo-checksum.json clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/.cargo-checksum.json --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/.cargo-checksum.json 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/.cargo-checksum.json 2023-08-15 22:24:19.000000000 +0000 @@ -1 +1 @@ -{"files":{"Cargo.toml":"7d4723ca4eea6d781e7e67c85a4f3537723b89e6c8e1b843e9f3a090d6c02d00","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"32cbd395594db59ecc43d7866cfa2663f3687bb7df631781d60ae83200dae8a8","build.rs":"275f7a9ee0b9eff972124951de544ae17ee3e698a4e89b0f0393b334344f5e30","src/detection.rs":"ed9a5f9a979ab01247d7a68eeb1afa3c13209334c5bfff0f9289cb07e5bb4e8b","src/fallback.rs":"b0b98566421529c309f4310565f24fa27608078c03c84e07a65f5aa1c0df2ae8","src/lib.rs":"4ba3c39bb516c6acbcfc5cfb45888ca79c93953768ac6a45bb2fb6f342f24874","src/location.rs":"f55d2e61f1bb1af65e14ed04c9e91eb1ddbf8430e8c05f2048d1cd538d27368e","src/marker.rs":"344a8394f06a1d43355b514920e7e3c0c6dce507be767e3a590bbe3552edd110","src/parse.rs":"06bd29cf594bb5d5cfff9b3371ce6a3367a13788354135e51e8b5ff195d06481","src/rcvec.rs":"49b6784c6ca5f32573cd8a83758b485d8acbfa126e5fb516ae439e429ef4c144","src/wrapper.rs":"6932058819a5c31c1765e6294f1a8279ab4ea1807de21a488c869fdfe13bf9d5","tests/comments.rs":"31115b3a56c83d93eef2fb4c9566bf4543e302560732986161b98aef504785ed","tests/features.rs":"a86deb8644992a4eb64d9fd493eff16f9cf9c5cb6ade3a634ce0c990cf87d559","tests/marker.rs":"cb6d776eba6a238d726b0f531883adf41957e06f2717ee8a069821c81e7081d6","tests/test.rs":"cf3c944f1c4a09c326b1e639f70c173f0d93d916fb50c085542e44fad691eea7","tests/test_fmt.rs":"9357769945784354909259084ec8b34d2aa52081dd3967cac6dae3a5e3df3bc0"},"package":"5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6"} \ No newline at end of file +{"files":{"Cargo.toml":"a60eadb453036fc5f0313f0a7bf0c973609209b3c9de07a166581cd3615302fe","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"690809ae2c2aa28e479bc38a7c296924b9a4c8b6a689c7b62a4f33ae834ffca9","build.rs":"6b0b19a3af5248513b186b9c28c133f5af34a1d11122c0268c68e89724aa40fa","rust-toolchain.toml":"6bbb61302978c736b2da03e4fb40e3beab908f85d533ab46fd541e637b5f3e0f","src/detection.rs":"ed9a5f9a979ab01247d7a68eeb1afa3c13209334c5bfff0f9289cb07e5bb4e8b","src/extra.rs":"d378a9e799e5c49933b067cd38f5364d16a152ef337eef86ce42fdc86005ddf3","src/fallback.rs":"2d3be80c4df730c2400f9b4fa953640c78b7701ac09823c0a007bfe0343dbc1e","src/lib.rs":"fb19025556ba827c8cc5dc4fe4840137e1ffe51476ead385e2b99d7939a1d733","src/location.rs":"f55d2e61f1bb1af65e14ed04c9e91eb1ddbf8430e8c05f2048d1cd538d27368e","src/marker.rs":"43f5a18f5059f1a16507c047b3b7387afee7f25ac45ba4eb1621ca7fa733eb01","src/parse.rs":"08af3478a0d9e5252c2e7a4968cb2837d3b8d23606cf67c662eb9e008c9e1cee","src/rcvec.rs":"1c3c48c4f819927cc445ae15ca3bb06775feff2fd1cb21901ae4c40c7e6b4e82","src/wrapper.rs":"06624150b94f4fd9ada30b2c9ad6936ea695d05c2138ceb14f2304e757133d52","tests/comments.rs":"31115b3a56c83d93eef2fb4c9566bf4543e302560732986161b98aef504785ed","tests/features.rs":"a86deb8644992a4eb64d9fd493eff16f9cf9c5cb6ade3a634ce0c990cf87d559","tests/marker.rs":"3190ee07dae510251f360db701ce257030f94a479b6689c3a9ef804bd5d8d099","tests/test.rs":"b6190b112f4fc15916d6ee6c9b51bbeff825e5fc807b37fd21cc1b119dd76a24","tests/test_fmt.rs":"b7743b612af65f2c88cbe109d50a093db7aa7e87f9e37bf45b7bbaeb240aa020","tests/test_size.rs":"acf05963c1e62052d769d237b50844a2c59b4182b491231b099a4f74e5456ab0"},"package":"18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"} \ No newline at end of file diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/Cargo.toml clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/Cargo.toml --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/Cargo.toml 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/Cargo.toml 2023-08-15 22:24:19.000000000 +0000 @@ -10,10 +10,10 @@ # See Cargo.toml.orig for the original contents. [package] -edition = "2018" -rust-version = "1.31" +edition = "2021" +rust-version = "1.56" name = "proc-macro2" -version = "1.0.51" +version = "1.0.66" authors = [ "David Tolnay ", "Alex Crichton ", @@ -40,6 +40,7 @@ "procmacro2_semver_exempt", "--cfg", "doc_cfg", + "--generate-link-to-definition", ] targets = ["x86_64-unknown-linux-gnu"] @@ -56,6 +57,9 @@ version = "1.0" default_features = false +[dev-dependencies.rustversion] +version = "1" + [features] default = ["proc-macro"] nightly = [] diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/README.md clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/README.md --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/README.md 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/README.md 2023-08-15 22:24:19.000000000 +0000 @@ -62,7 +62,7 @@ To opt into the additional APIs available in the most recent nightly compiler, the `procmacro2_semver_exempt` config flag must be passed to rustc. We will -polyfill those nightly-only APIs back to Rust 1.31.0. As these are unstable APIs +polyfill those nightly-only APIs back to Rust 1.56.0. As these are unstable APIs that track the nightly compiler, minor versions of proc-macro2 may make breaking changes to them at any time. diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/rust-toolchain.toml clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/rust-toolchain.toml --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/rust-toolchain.toml 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/rust-toolchain.toml 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,2 @@ +[toolchain] +components = ["rust-src"] diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/extra.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/extra.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/extra.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/extra.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,84 @@ +//! Items which do not have a correspondence to any API in the proc_macro crate, +//! but are necessary to include in proc-macro2. + +use crate::fallback; +use crate::imp; +use crate::marker::Marker; +use crate::Span; +use core::fmt::{self, Debug}; + +/// An object that holds a [`Group`]'s `span_open()` and `span_close()` together +/// (in a more compact representation than holding those 2 spans individually. +/// +/// [`Group`]: crate::Group +#[derive(Copy, Clone)] +pub struct DelimSpan { + inner: DelimSpanEnum, + _marker: Marker, +} + +#[derive(Copy, Clone)] +enum DelimSpanEnum { + #[cfg(wrap_proc_macro)] + Compiler { + join: proc_macro::Span, + open: proc_macro::Span, + close: proc_macro::Span, + }, + Fallback(fallback::Span), +} + +impl DelimSpan { + pub(crate) fn new(group: &imp::Group) -> Self { + #[cfg(wrap_proc_macro)] + let inner = match group { + imp::Group::Compiler(group) => DelimSpanEnum::Compiler { + join: group.span(), + open: group.span_open(), + close: group.span_close(), + }, + imp::Group::Fallback(group) => DelimSpanEnum::Fallback(group.span()), + }; + + #[cfg(not(wrap_proc_macro))] + let inner = DelimSpanEnum::Fallback(group.span()); + + DelimSpan { + inner, + _marker: Marker, + } + } + + /// Returns a span covering the entire delimited group. + pub fn join(&self) -> Span { + match &self.inner { + #[cfg(wrap_proc_macro)] + DelimSpanEnum::Compiler { join, .. } => Span::_new(imp::Span::Compiler(*join)), + DelimSpanEnum::Fallback(span) => Span::_new_fallback(*span), + } + } + + /// Returns a span for the opening punctuation of the group only. + pub fn open(&self) -> Span { + match &self.inner { + #[cfg(wrap_proc_macro)] + DelimSpanEnum::Compiler { open, .. } => Span::_new(imp::Span::Compiler(*open)), + DelimSpanEnum::Fallback(span) => Span::_new_fallback(span.first_byte()), + } + } + + /// Returns a span for the closing punctuation of the group only. + pub fn close(&self) -> Span { + match &self.inner { + #[cfg(wrap_proc_macro)] + DelimSpanEnum::Compiler { close, .. } => Span::_new(imp::Span::Compiler(*close)), + DelimSpanEnum::Fallback(span) => Span::_new_fallback(span.last_byte()), + } + } +} + +impl Debug for DelimSpan { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(&self.join(), f) + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/fallback.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/fallback.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/fallback.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/fallback.rs 2023-08-15 22:24:19.000000000 +0000 @@ -3,18 +3,15 @@ use crate::parse::{self, Cursor}; use crate::rcvec::{RcVec, RcVecBuilder, RcVecIntoIter, RcVecMut}; use crate::{Delimiter, Spacing, TokenTree}; -#[cfg(span_locations)] +#[cfg(all(span_locations, not(fuzzing)))] use core::cell::RefCell; #[cfg(span_locations)] use core::cmp; use core::fmt::{self, Debug, Display, Write}; -use core::iter::FromIterator; use core::mem::ManuallyDrop; use core::ops::RangeBounds; use core::ptr; use core::str::FromStr; -#[cfg(procmacro2_semver_exempt)] -use std::path::Path; use std::path::PathBuf; /// Force use of proc-macro2's fallback implementation of the API for now, even @@ -73,7 +70,6 @@ fn push_token_from_proc_macro(mut vec: RcVecMut, token: TokenTree) { // https://github.com/dtolnay/proc-macro2/issues/235 match token { - #[cfg(not(no_bind_by_move_pattern_guard))] TokenTree::Literal(crate::Literal { #[cfg(wrap_proc_macro)] inner: crate::imp::Literal::Fallback(literal), @@ -83,20 +79,6 @@ }) if literal.repr.starts_with('-') => { push_negative_literal(vec, literal); } - #[cfg(no_bind_by_move_pattern_guard)] - TokenTree::Literal(crate::Literal { - #[cfg(wrap_proc_macro)] - inner: crate::imp::Literal::Fallback(literal), - #[cfg(not(wrap_proc_macro))] - inner: literal, - .. - }) => { - if literal.repr.starts_with('-') { - push_negative_literal(vec, literal); - } else { - vec.push(TokenTree::Literal(crate::Literal::_new_stable(literal))); - } - } _ => vec.push(token), } @@ -104,9 +86,9 @@ fn push_negative_literal(mut vec: RcVecMut, mut literal: Literal) { literal.repr.remove(0); let mut punct = crate::Punct::new('-', Spacing::Alone); - punct.set_span(crate::Span::_new_stable(literal.span)); + punct.set_span(crate::Span::_new_fallback(literal.span)); vec.push(TokenTree::Punct(punct)); - vec.push(TokenTree::Literal(crate::Literal::_new_stable(literal))); + vec.push(TokenTree::Literal(crate::Literal::_new_fallback(literal))); } } @@ -162,11 +144,14 @@ #[cfg(span_locations)] fn get_cursor(src: &str) -> Cursor { + #[cfg(fuzzing)] + return Cursor { rest: src, off: 1 }; + // Create a dummy file & add it to the source map + #[cfg(not(fuzzing))] SOURCE_MAP.with(|cm| { let mut cm = cm.borrow_mut(); - let name = format!("", cm.files.len()); - let span = cm.add_file(&name, src); + let span = cm.add_file(src); Cursor { rest: src, off: span.lo, @@ -232,7 +217,7 @@ } } -#[cfg(use_proc_macro)] +#[cfg(feature = "proc-macro")] impl From for TokenStream { fn from(inner: proc_macro::TokenStream) -> Self { inner @@ -242,7 +227,7 @@ } } -#[cfg(use_proc_macro)] +#[cfg(feature = "proc-macro")] impl From for proc_macro::TokenStream { fn from(inner: TokenStream) -> Self { inner @@ -334,29 +319,27 @@ } } -#[cfg(span_locations)] +#[cfg(all(span_locations, not(fuzzing)))] thread_local! { static SOURCE_MAP: RefCell = RefCell::new(SourceMap { // NOTE: We start with a single dummy file which all call_site() and // def_site() spans reference. files: vec![FileInfo { - #[cfg(procmacro2_semver_exempt)] - name: "".to_owned(), + source_text: String::new(), span: Span { lo: 0, hi: 0 }, lines: vec![0], }], }); } -#[cfg(span_locations)] +#[cfg(all(span_locations, not(fuzzing)))] struct FileInfo { - #[cfg(procmacro2_semver_exempt)] - name: String, + source_text: String, span: Span, lines: Vec, } -#[cfg(span_locations)] +#[cfg(all(span_locations, not(fuzzing)))] impl FileInfo { fn offset_line_column(&self, offset: usize) -> LineColumn { assert!(self.span_within(Span { @@ -379,11 +362,17 @@ fn span_within(&self, span: Span) -> bool { span.lo >= self.span.lo && span.hi <= self.span.hi } + + fn source_text(&self, span: Span) -> String { + let lo = (span.lo - self.span.lo) as usize; + let hi = (span.hi - self.span.lo) as usize; + self.source_text[lo..hi].to_owned() + } } /// Computes the offsets of each line in the given source string /// and the total number of characters -#[cfg(span_locations)] +#[cfg(all(span_locations, not(fuzzing)))] fn lines_offsets(s: &str) -> (usize, Vec) { let mut lines = vec![0]; let mut total = 0; @@ -398,12 +387,12 @@ (total, lines) } -#[cfg(span_locations)] +#[cfg(all(span_locations, not(fuzzing)))] struct SourceMap { files: Vec, } -#[cfg(span_locations)] +#[cfg(all(span_locations, not(fuzzing)))] impl SourceMap { fn next_start_pos(&self) -> u32 { // Add 1 so there's always space between files. @@ -413,7 +402,7 @@ self.files.last().unwrap().span.hi + 1 } - fn add_file(&mut self, name: &str, src: &str) -> Span { + fn add_file(&mut self, src: &str) -> Span { let (len, lines) = lines_offsets(src); let lo = self.next_start_pos(); // XXX(nika): Should we bother doing a checked cast or checked add here? @@ -423,25 +412,35 @@ }; self.files.push(FileInfo { - #[cfg(procmacro2_semver_exempt)] - name: name.to_owned(), + source_text: src.to_owned(), span, lines, }); - #[cfg(not(procmacro2_semver_exempt))] - let _ = name; - span } + #[cfg(procmacro2_semver_exempt)] + fn filepath(&self, span: Span) -> PathBuf { + for (i, file) in self.files.iter().enumerate() { + if file.span_within(span) { + return PathBuf::from(if i == 0 { + "".to_owned() + } else { + format!("", i) + }); + } + } + unreachable!("Invalid span with no related FileInfo!"); + } + fn fileinfo(&self, span: Span) -> &FileInfo { for file in &self.files { if file.span_within(span) { return file; } } - panic!("Invalid span with no related FileInfo!"); + unreachable!("Invalid span with no related FileInfo!"); } } @@ -464,7 +463,6 @@ Span { lo: 0, hi: 0 } } - #[cfg(not(no_hygiene))] pub fn mixed_site() -> Self { Span::call_site() } @@ -487,17 +485,25 @@ #[cfg(procmacro2_semver_exempt)] pub fn source_file(&self) -> SourceFile { + #[cfg(fuzzing)] + return SourceFile { + path: PathBuf::from(""), + }; + + #[cfg(not(fuzzing))] SOURCE_MAP.with(|cm| { let cm = cm.borrow(); - let fi = cm.fileinfo(*self); - SourceFile { - path: Path::new(&fi.name).to_owned(), - } + let path = cm.filepath(*self); + SourceFile { path } }) } #[cfg(span_locations)] pub fn start(&self) -> LineColumn { + #[cfg(fuzzing)] + return LineColumn { line: 0, column: 0 }; + + #[cfg(not(fuzzing))] SOURCE_MAP.with(|cm| { let cm = cm.borrow(); let fi = cm.fileinfo(*self); @@ -507,6 +513,10 @@ #[cfg(span_locations)] pub fn end(&self) -> LineColumn { + #[cfg(fuzzing)] + return LineColumn { line: 0, column: 0 }; + + #[cfg(not(fuzzing))] SOURCE_MAP.with(|cm| { let cm = cm.borrow(); let fi = cm.fileinfo(*self); @@ -514,26 +524,6 @@ }) } - #[cfg(procmacro2_semver_exempt)] - pub fn before(&self) -> Span { - Span { - #[cfg(span_locations)] - lo: self.lo, - #[cfg(span_locations)] - hi: self.lo, - } - } - - #[cfg(procmacro2_semver_exempt)] - pub fn after(&self) -> Span { - Span { - #[cfg(span_locations)] - lo: self.hi, - #[cfg(span_locations)] - hi: self.hi, - } - } - #[cfg(not(span_locations))] pub fn join(&self, _other: Span) -> Option { Some(Span {}) @@ -541,6 +531,13 @@ #[cfg(span_locations)] pub fn join(&self, other: Span) -> Option { + #[cfg(fuzzing)] + return { + let _ = other; + None + }; + + #[cfg(not(fuzzing))] SOURCE_MAP.with(|cm| { let cm = cm.borrow(); // If `other` is not within the same FileInfo as us, return None. @@ -555,12 +552,32 @@ } #[cfg(not(span_locations))] - fn first_byte(self) -> Self { + pub fn source_text(&self) -> Option { + None + } + + #[cfg(span_locations)] + pub fn source_text(&self) -> Option { + #[cfg(fuzzing)] + return None; + + #[cfg(not(fuzzing))] + { + if self.is_call_site() { + None + } else { + Some(SOURCE_MAP.with(|cm| cm.borrow().fileinfo(*self).source_text(*self))) + } + } + } + + #[cfg(not(span_locations))] + pub(crate) fn first_byte(self) -> Self { self } #[cfg(span_locations)] - fn first_byte(self) -> Self { + pub(crate) fn first_byte(self) -> Self { Span { lo: self.lo, hi: cmp::min(self.lo.saturating_add(1), self.hi), @@ -568,17 +585,22 @@ } #[cfg(not(span_locations))] - fn last_byte(self) -> Self { + pub(crate) fn last_byte(self) -> Self { self } #[cfg(span_locations)] - fn last_byte(self) -> Self { + pub(crate) fn last_byte(self) -> Self { Span { lo: cmp::max(self.hi.saturating_sub(1), self.lo), hi: self.hi, } } + + #[cfg(span_locations)] + fn is_call_site(&self) -> bool { + self.lo == 0 && self.hi == 0 + } } impl Debug for Span { @@ -594,7 +616,7 @@ pub(crate) fn debug_span_field_if_nontrivial(debug: &mut fmt::DebugStruct, span: Span) { #[cfg(span_locations)] { - if span.lo == 0 && span.hi == 0 { + if span.is_call_site() { return; } } @@ -730,7 +752,7 @@ panic!("Ident is not allowed to be empty; use Option"); } - if string.bytes().all(|digit| digit >= b'0' && digit <= b'9') { + if string.bytes().all(|digit| b'0' <= digit && digit <= b'9') { panic!("Ident cannot be a number; use Literal instead"); } @@ -791,6 +813,7 @@ } } +#[allow(clippy::missing_fields_in_debug)] impl Debug for Ident { // Ident(proc_macro), Ident(r#union) #[cfg(not(span_locations))] @@ -899,12 +922,25 @@ pub fn string(t: &str) -> Literal { let mut repr = String::with_capacity(t.len() + 2); repr.push('"'); - for c in t.chars() { - if c == '\'' { + let mut chars = t.chars(); + while let Some(ch) = chars.next() { + if ch == '\0' { + repr.push_str( + if chars + .as_str() + .starts_with(|next| '0' <= next && next <= '7') + { + // circumvent clippy::octal_escapes lint + "\\x00" + } else { + "\\0" + }, + ); + } else if ch == '\'' { // escape_debug turns this into "\'" which is unnecessary. - repr.push(c); + repr.push(ch); } else { - repr.extend(c.escape_debug()); + repr.extend(ch.escape_debug()); } } repr.push('"'); @@ -926,16 +962,21 @@ pub fn byte_string(bytes: &[u8]) -> Literal { let mut escaped = "b\"".to_string(); - for b in bytes { + let mut bytes = bytes.iter(); + while let Some(&b) = bytes.next() { #[allow(clippy::match_overlapping_arm)] - match *b { - b'\0' => escaped.push_str(r"\0"), + match b { + b'\0' => escaped.push_str(match bytes.as_slice().first() { + // circumvent clippy::octal_escapes lint + Some(b'0'..=b'7') => r"\x00", + _ => r"\0", + }), b'\t' => escaped.push_str(r"\t"), b'\n' => escaped.push_str(r"\n"), b'\r' => escaped.push_str(r"\r"), b'"' => escaped.push_str("\\\""), b'\\' => escaped.push_str("\\\\"), - b'\x20'..=b'\x7E' => escaped.push(*b as char), + b'\x20'..=b'\x7E' => escaped.push(b as char), _ => { let _ = write!(escaped, "\\x{:02X}", b); } @@ -953,28 +994,75 @@ self.span = span; } - pub fn subspan>(&self, _range: R) -> Option { - None + pub fn subspan>(&self, range: R) -> Option { + #[cfg(not(span_locations))] + { + let _ = range; + None + } + + #[cfg(span_locations)] + { + use core::ops::Bound; + + let lo = match range.start_bound() { + Bound::Included(start) => { + let start = u32::try_from(*start).ok()?; + self.span.lo.checked_add(start)? + } + Bound::Excluded(start) => { + let start = u32::try_from(*start).ok()?; + self.span.lo.checked_add(start)?.checked_add(1)? + } + Bound::Unbounded => self.span.lo, + }; + let hi = match range.end_bound() { + Bound::Included(end) => { + let end = u32::try_from(*end).ok()?; + self.span.lo.checked_add(end)?.checked_add(1)? + } + Bound::Excluded(end) => { + let end = u32::try_from(*end).ok()?; + self.span.lo.checked_add(end)? + } + Bound::Unbounded => self.span.hi, + }; + if lo <= hi && hi <= self.span.hi { + Some(Span { lo, hi }) + } else { + None + } + } } } impl FromStr for Literal { type Err = LexError; - fn from_str(mut repr: &str) -> Result { - let negative = repr.starts_with('-'); + fn from_str(repr: &str) -> Result { + let mut cursor = get_cursor(repr); + #[cfg(span_locations)] + let lo = cursor.off; + + let negative = cursor.starts_with_char('-'); if negative { - repr = &repr[1..]; - if !repr.starts_with(|ch: char| ch.is_ascii_digit()) { + cursor = cursor.advance(1); + if !cursor.starts_with_fn(|ch| ch.is_ascii_digit()) { return Err(LexError::call_site()); } } - let cursor = get_cursor(repr); - if let Ok((_rest, mut literal)) = parse::literal(cursor) { - if literal.repr.len() == repr.len() { + + if let Ok((rest, mut literal)) = parse::literal(cursor) { + if rest.is_empty() { if negative { literal.repr.insert(0, '-'); } + literal.span = Span { + #[cfg(span_locations)] + lo, + #[cfg(span_locations)] + hi: rest.off, + }; return Ok(literal); } } diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/lib.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/lib.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/lib.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/lib.rs 2023-08-15 22:24:19.000000000 +0000 @@ -65,7 +65,7 @@ //! //! To opt into the additional APIs available in the most recent nightly //! compiler, the `procmacro2_semver_exempt` config flag must be passed to -//! rustc. We will polyfill those nightly-only APIs back to Rust 1.31.0. As +//! rustc. We will polyfill those nightly-only APIs back to Rust 1.56.0. As //! these are unstable APIs that track the nightly compiler, minor versions of //! proc-macro2 may make breaking changes to them at any time. //! @@ -86,11 +86,8 @@ //! a different thread. // Proc-macro2 types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/proc-macro2/1.0.51")] -#![cfg_attr( - any(proc_macro_span, super_unstable), - feature(proc_macro_span, proc_macro_span_shrink) -)] +#![doc(html_root_url = "https://docs.rs/proc-macro2/1.0.66")] +#![cfg_attr(any(proc_macro_span, super_unstable), feature(proc_macro_span))] #![cfg_attr(super_unstable, feature(proc_macro_def_site))] #![cfg_attr(doc_cfg, feature(doc_cfg))] #![allow( @@ -98,9 +95,12 @@ clippy::cast_possible_truncation, clippy::doc_markdown, clippy::items_after_statements, + clippy::let_underscore_untyped, clippy::manual_assert, + clippy::manual_range_contains, clippy::must_use_candidate, clippy::needless_doctest_main, + clippy::new_without_default, clippy::return_self_not_must_use, clippy::shadow_unrelated, clippy::trivially_copy_pass_by_ref, @@ -118,7 +118,9 @@ build script as well. "} -#[cfg(use_proc_macro)] +extern crate alloc; + +#[cfg(feature = "proc-macro")] extern crate proc_macro; mod marker; @@ -133,6 +135,8 @@ #[doc(hidden)] pub mod fallback; +pub mod extra; + #[cfg(not(wrap_proc_macro))] use crate::fallback as imp; #[path = "wrapper.rs"] @@ -142,11 +146,11 @@ #[cfg(span_locations)] mod location; +use crate::extra::DelimSpan; use crate::marker::Marker; use core::cmp::Ordering; use core::fmt::{self, Debug, Display}; use core::hash::{Hash, Hasher}; -use core::iter::FromIterator; use core::ops::RangeBounds; use core::str::FromStr; use std::error::Error; @@ -183,7 +187,7 @@ } } - fn _new_stable(inner: fallback::TokenStream) -> Self { + fn _new_fallback(inner: fallback::TokenStream) -> Self { TokenStream { inner: inner.into(), _marker: Marker, @@ -229,14 +233,16 @@ } } -#[cfg(use_proc_macro)] +#[cfg(feature = "proc-macro")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "proc-macro")))] impl From for TokenStream { fn from(inner: proc_macro::TokenStream) -> Self { TokenStream::_new(inner.into()) } } -#[cfg(use_proc_macro)] +#[cfg(feature = "proc-macro")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "proc-macro")))] impl From for proc_macro::TokenStream { fn from(inner: TokenStream) -> Self { inner.inner.into() @@ -377,7 +383,7 @@ } } - fn _new_stable(inner: fallback::Span) -> Self { + fn _new_fallback(inner: fallback::Span) -> Self { Span { inner: inner.into(), _marker: Marker, @@ -396,9 +402,6 @@ /// The span located at the invocation of the procedural macro, but with /// local variables, labels, and `$crate` resolved at the definition site /// of the macro. This is the same hygiene behavior as `macro_rules`. - /// - /// This function requires Rust 1.45 or later. - #[cfg(not(no_hygiene))] pub fn mixed_site() -> Self { Span::_new(imp::Span::mixed_site()) } @@ -485,24 +488,6 @@ self.inner.end() } - /// Creates an empty span pointing to directly before this span. - /// - /// This method is semver exempt and not exposed by default. - #[cfg(all(procmacro2_semver_exempt, any(not(wrap_proc_macro), super_unstable)))] - #[cfg_attr(doc_cfg, doc(cfg(procmacro2_semver_exempt)))] - pub fn before(&self) -> Span { - Span::_new(self.inner.before()) - } - - /// Creates an empty span pointing to directly after this span. - /// - /// This method is semver exempt and not exposed by default. - #[cfg(all(procmacro2_semver_exempt, any(not(wrap_proc_macro), super_unstable)))] - #[cfg_attr(doc_cfg, doc(cfg(procmacro2_semver_exempt)))] - pub fn after(&self) -> Span { - Span::_new(self.inner.after()) - } - /// Create a new span encompassing `self` and `other`. /// /// Returns `None` if `self` and `other` are from different files. @@ -524,6 +509,17 @@ pub fn eq(&self, other: &Span) -> bool { self.inner.eq(&other.inner) } + + /// Returns the source text behind a span. This preserves the original + /// source code, including spaces and comments. It only returns a result if + /// the span corresponds to real source code. + /// + /// Note: The observable result of a macro should only rely on the tokens + /// and not on this source text. The result of this function is a best + /// effort to be used for diagnostics only. + pub fn source_text(&self) -> Option { + self.inner.source_text() + } } /// Prints a span in a form convenient for debugging. @@ -664,7 +660,7 @@ Group { inner } } - fn _new_stable(inner: fallback::Group) -> Self { + fn _new_fallback(inner: fallback::Group) -> Self { Group { inner: inner.into(), } @@ -681,7 +677,8 @@ } } - /// Returns the delimiter of this `Group` + /// Returns the punctuation used as the delimiter for this group: a set of + /// parentheses, square brackets, or curly braces. pub fn delimiter(&self) -> Delimiter { self.inner.delimiter() } @@ -725,6 +722,13 @@ Span::_new(self.inner.span_close()) } + /// Returns an object that holds this group's `span_open()` and + /// `span_close()` together (in a more compact representation than holding + /// those 2 spans individually). + pub fn delim_span(&self) -> DelimSpan { + DelimSpan::new(&self.inner) + } + /// Configures the span for this `Group`'s delimiters, but not its internal /// tokens. /// @@ -1081,7 +1085,7 @@ } } - fn _new_stable(inner: fallback::Literal) -> Self { + fn _new_fallback(inner: fallback::Literal) -> Self { Literal { inner: inner.into(), _marker: Marker, diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/marker.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/marker.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/marker.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/marker.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,6 +1,6 @@ +use alloc::rc::Rc; use core::marker::PhantomData; -use std::panic::{RefUnwindSafe, UnwindSafe}; -use std::rc::Rc; +use core::panic::{RefUnwindSafe, UnwindSafe}; // Zero sized marker with the correct set of autotrait impls we want all proc // macro types to have. diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/parse.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/parse.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/parse.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/parse.rs 2023-08-15 22:24:19.000000000 +0000 @@ -27,7 +27,18 @@ self.rest.starts_with(s) } - fn is_empty(&self) -> bool { + pub fn starts_with_char(&self, ch: char) -> bool { + self.rest.starts_with(ch) + } + + pub fn starts_with_fn(&self, f: Pattern) -> bool + where + Pattern: FnMut(char) -> bool, + { + self.rest.starts_with(f) + } + + pub fn is_empty(&self) -> bool { self.rest.is_empty() } @@ -97,7 +108,7 @@ s = s.advance(1); continue; } - b if b <= 0x7f => {} + b if b.is_ascii() => {} _ => { let ch = s.chars().next().unwrap(); if is_whitespace(ch) { @@ -217,13 +228,13 @@ hi: input.off, }); trees = outer; - trees.push_token_from_parser(TokenTree::Group(crate::Group::_new_stable(g))); + trees.push_token_from_parser(TokenTree::Group(crate::Group::_new_fallback(g))); } else { let (rest, mut tt) = match leaf_token(input) { Ok((rest, tt)) => (rest, tt), Err(Reject) => return Err(lex_error(input)), }; - tt.set_span(crate::Span::_new_stable(Span { + tt.set_span(crate::Span::_new_fallback(Span { #[cfg(span_locations)] lo, #[cfg(span_locations)] @@ -251,7 +262,7 @@ fn leaf_token(input: Cursor) -> PResult { if let Ok((input, l)) = literal(input) { // must be parsed before ident - Ok((input, TokenTree::Literal(crate::Literal::_new_stable(l)))) + Ok((input, TokenTree::Literal(crate::Literal::_new_fallback(l)))) } else if let Ok((input, p)) = punct(input) { Ok((input, TokenTree::Punct(p))) } else if let Ok((input, i)) = ident(input) { @@ -262,9 +273,11 @@ } fn ident(input: Cursor) -> PResult { - if ["r\"", "r#\"", "r##", "b\"", "b\'", "br\"", "br#"] - .iter() - .any(|prefix| input.starts_with(prefix)) + if [ + "r\"", "r#\"", "r##", "b\"", "b\'", "br\"", "br#", "c\"", "cr\"", "cr#", + ] + .iter() + .any(|prefix| input.starts_with(prefix)) { Err(Reject) } else { @@ -322,6 +335,8 @@ Ok(ok) } else if let Ok(ok) = byte_string(input) { Ok(ok) + } else if let Ok(ok) = c_string(input) { + Ok(ok) } else if let Ok(ok) = byte(input) { Ok(ok) } else if let Ok(ok) = character(input) { @@ -352,8 +367,8 @@ } } -fn cooked_string(input: Cursor) -> Result { - let mut chars = input.char_indices().peekable(); +fn cooked_string(mut input: Cursor) -> Result { + let mut chars = input.char_indices(); while let Some((i, ch)) = chars.next() { match ch { @@ -367,31 +382,16 @@ }, '\\' => match chars.next() { Some((_, 'x')) => { - if !backslash_x_char(&mut chars) { - break; - } + backslash_x_char(&mut chars)?; } - Some((_, 'n')) | Some((_, 'r')) | Some((_, 't')) | Some((_, '\\')) - | Some((_, '\'')) | Some((_, '"')) | Some((_, '0')) => {} + Some((_, 'n' | 'r' | 't' | '\\' | '\'' | '"' | '0')) => {} Some((_, 'u')) => { - if !backslash_u(&mut chars) { - break; - } + backslash_u(&mut chars)?; } - Some((_, ch @ '\n')) | Some((_, ch @ '\r')) => { - let mut last = ch; - loop { - if last == '\r' && chars.next().map_or(true, |(_, ch)| ch != '\n') { - return Err(Reject); - } - match chars.peek() { - Some((_, ch)) if ch.is_whitespace() => { - last = *ch; - chars.next(); - } - _ => break, - } - } + Some((newline, ch @ ('\n' | '\r'))) => { + input = input.advance(newline + 1); + trailing_backslash(&mut input, ch as u8)?; + chars = input.char_indices(); } _ => break, }, @@ -401,11 +401,30 @@ Err(Reject) } +fn raw_string(input: Cursor) -> Result { + let (input, delimiter) = delimiter_of_raw_string(input)?; + let mut bytes = input.bytes().enumerate(); + while let Some((i, byte)) = bytes.next() { + match byte { + b'"' if input.rest[i + 1..].starts_with(delimiter) => { + let rest = input.advance(i + 1 + delimiter.len()); + return Ok(literal_suffix(rest)); + } + b'\r' => match bytes.next() { + Some((_, b'\n')) => {} + _ => break, + }, + _ => {} + } + } + Err(Reject) +} + fn byte_string(input: Cursor) -> Result { if let Ok(input) = input.parse("b\"") { cooked_byte_string(input) } else if let Ok(input) = input.parse("br") { - raw_string(input) + raw_byte_string(input) } else { Err(Reject) } @@ -425,68 +444,125 @@ }, b'\\' => match bytes.next() { Some((_, b'x')) => { - if !backslash_x_byte(&mut bytes) { - break; - } + backslash_x_byte(&mut bytes)?; } - Some((_, b'n')) | Some((_, b'r')) | Some((_, b't')) | Some((_, b'\\')) - | Some((_, b'0')) | Some((_, b'\'')) | Some((_, b'"')) => {} - Some((newline, b @ b'\n')) | Some((newline, b @ b'\r')) => { - let mut last = b as char; - let rest = input.advance(newline + 1); - let mut chars = rest.char_indices(); - loop { - if last == '\r' && chars.next().map_or(true, |(_, ch)| ch != '\n') { - return Err(Reject); - } - match chars.next() { - Some((_, ch)) if ch.is_whitespace() => last = ch, - Some((offset, _)) => { - input = rest.advance(offset); - bytes = input.bytes().enumerate(); - break; - } - None => return Err(Reject), - } - } + Some((_, b'n' | b'r' | b't' | b'\\' | b'0' | b'\'' | b'"')) => {} + Some((newline, b @ (b'\n' | b'\r'))) => { + input = input.advance(newline + 1); + trailing_backslash(&mut input, b)?; + bytes = input.bytes().enumerate(); } _ => break, }, - b if b < 0x80 => {} + b if b.is_ascii() => {} _ => break, } } Err(Reject) } -fn raw_string(input: Cursor) -> Result { - let mut chars = input.char_indices(); - let mut n = 0; - for (i, ch) in &mut chars { - match ch { - '"' => { - n = i; - break; +fn delimiter_of_raw_string(input: Cursor) -> PResult<&str> { + for (i, byte) in input.bytes().enumerate() { + match byte { + b'"' => { + if i > 255 { + // https://github.com/rust-lang/rust/pull/95251 + return Err(Reject); + } + return Ok((input.advance(i + 1), &input.rest[..i])); } - '#' => {} - _ => return Err(Reject), + b'#' => {} + _ => break, } } - if n > 255 { - // https://github.com/rust-lang/rust/pull/95251 - return Err(Reject); + Err(Reject) +} + +fn raw_byte_string(input: Cursor) -> Result { + let (input, delimiter) = delimiter_of_raw_string(input)?; + let mut bytes = input.bytes().enumerate(); + while let Some((i, byte)) = bytes.next() { + match byte { + b'"' if input.rest[i + 1..].starts_with(delimiter) => { + let rest = input.advance(i + 1 + delimiter.len()); + return Ok(literal_suffix(rest)); + } + b'\r' => match bytes.next() { + Some((_, b'\n')) => {} + _ => break, + }, + other => { + if !other.is_ascii() { + break; + } + } + } } + Err(Reject) +} + +fn c_string(input: Cursor) -> Result { + if let Ok(input) = input.parse("c\"") { + cooked_c_string(input) + } else if let Ok(input) = input.parse("cr") { + raw_c_string(input) + } else { + Err(Reject) + } +} + +fn raw_c_string(input: Cursor) -> Result { + let (input, delimiter) = delimiter_of_raw_string(input)?; + let mut bytes = input.bytes().enumerate(); + while let Some((i, byte)) = bytes.next() { + match byte { + b'"' if input.rest[i + 1..].starts_with(delimiter) => { + let rest = input.advance(i + 1 + delimiter.len()); + return Ok(literal_suffix(rest)); + } + b'\r' => match bytes.next() { + Some((_, b'\n')) => {} + _ => break, + }, + b'\0' => break, + _ => {} + } + } + Err(Reject) +} + +fn cooked_c_string(mut input: Cursor) -> Result { + let mut chars = input.char_indices(); + while let Some((i, ch)) = chars.next() { match ch { - '"' if input.rest[i + 1..].starts_with(&input.rest[..n]) => { - let rest = input.advance(i + 1 + n); - return Ok(literal_suffix(rest)); + '"' => { + let input = input.advance(i + 1); + return Ok(literal_suffix(input)); } '\r' => match chars.next() { Some((_, '\n')) => {} _ => break, }, - _ => {} + '\\' => match chars.next() { + Some((_, 'x')) => { + backslash_x_nonzero(&mut chars)?; + } + Some((_, 'n' | 'r' | 't' | '\\' | '\'' | '"')) => {} + Some((_, 'u')) => { + if backslash_u(&mut chars)? == '\0' { + break; + } + } + Some((newline, ch @ ('\n' | '\r'))) => { + input = input.advance(newline + 1); + trailing_backslash(&mut input, ch as u8)?; + chars = input.char_indices(); + } + _ => break, + }, + '\0' => break, + _ch => {} } } Err(Reject) @@ -497,9 +573,8 @@ let mut bytes = input.bytes().enumerate(); let ok = match bytes.next().map(|(_, b)| b) { Some(b'\\') => match bytes.next().map(|(_, b)| b) { - Some(b'x') => backslash_x_byte(&mut bytes), - Some(b'n') | Some(b'r') | Some(b't') | Some(b'\\') | Some(b'0') | Some(b'\'') - | Some(b'"') => true, + Some(b'x') => backslash_x_byte(&mut bytes).is_ok(), + Some(b'n' | b'r' | b't' | b'\\' | b'0' | b'\'' | b'"') => true, _ => false, }, b => b.is_some(), @@ -520,11 +595,9 @@ let mut chars = input.char_indices(); let ok = match chars.next().map(|(_, ch)| ch) { Some('\\') => match chars.next().map(|(_, ch)| ch) { - Some('x') => backslash_x_char(&mut chars), - Some('u') => backslash_u(&mut chars), - Some('n') | Some('r') | Some('t') | Some('\\') | Some('0') | Some('\'') | Some('"') => { - true - } + Some('x') => backslash_x_char(&mut chars).is_ok(), + Some('u') => backslash_u(&mut chars).is_ok(), + Some('n' | 'r' | 't' | '\\' | '0' | '\'' | '"') => true, _ => false, }, ch => ch.is_some(), @@ -538,36 +611,49 @@ } macro_rules! next_ch { - ($chars:ident @ $pat:pat $(| $rest:pat)*) => { + ($chars:ident @ $pat:pat) => { match $chars.next() { Some((_, ch)) => match ch { - $pat $(| $rest)* => ch, - _ => return false, + $pat => ch, + _ => return Err(Reject), }, - None => return false, + None => return Err(Reject), } }; } -fn backslash_x_char(chars: &mut I) -> bool +fn backslash_x_char(chars: &mut I) -> Result<(), Reject> where I: Iterator, { next_ch!(chars @ '0'..='7'); next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F'); - true + Ok(()) } -fn backslash_x_byte(chars: &mut I) -> bool +fn backslash_x_byte(chars: &mut I) -> Result<(), Reject> where I: Iterator, { next_ch!(chars @ b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F'); next_ch!(chars @ b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F'); - true + Ok(()) } -fn backslash_u(chars: &mut I) -> bool +fn backslash_x_nonzero(chars: &mut I) -> Result<(), Reject> +where + I: Iterator, +{ + let first = next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F'); + let second = next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F'); + if first == '0' && second == '0' { + Err(Reject) + } else { + Ok(()) + } +} + +fn backslash_u(chars: &mut I) -> Result where I: Iterator, { @@ -580,17 +666,36 @@ 'a'..='f' => 10 + ch as u8 - b'a', 'A'..='F' => 10 + ch as u8 - b'A', '_' if len > 0 => continue, - '}' if len > 0 => return char::from_u32(value).is_some(), - _ => return false, + '}' if len > 0 => return char::from_u32(value).ok_or(Reject), + _ => break, }; if len == 6 { - return false; + break; } value *= 0x10; value += u32::from(digit); len += 1; } - false + Err(Reject) +} + +fn trailing_backslash(input: &mut Cursor, mut last: u8) -> Result<(), Reject> { + let mut whitespace = input.bytes().enumerate(); + loop { + if last == b'\r' && whitespace.next().map_or(true, |(_, b)| b != b'\n') { + return Err(Reject); + } + match whitespace.next() { + Some((_, b @ (b' ' | b'\t' | b'\n' | b'\r'))) => { + last = b; + } + Some((offset, _)) => { + *input = input.advance(offset); + return Ok(()); + } + None => return Err(Reject), + } + } } fn float(input: Cursor) -> Result { @@ -606,7 +711,7 @@ fn float_digits(input: Cursor) -> Result { let mut chars = input.chars().peekable(); match chars.next() { - Some(ch) if ch >= '0' && ch <= '9' => {} + Some(ch) if '0' <= ch && ch <= '9' => {} _ => return Err(Reject), } @@ -756,7 +861,7 @@ fn punct(input: Cursor) -> PResult { let (rest, ch) = punct_char(input)?; if ch == '\'' { - if ident_any(rest)?.0.starts_with("'") { + if ident_any(rest)?.0.starts_with_char('\'') { Err(Reject) } else { Ok((rest, Punct::new('\'', Spacing::Joint))) @@ -795,7 +900,7 @@ #[cfg(span_locations)] let lo = input.off; let (rest, (comment, inner)) = doc_comment_contents(input)?; - let span = crate::Span::_new_stable(Span { + let span = crate::Span::_new_fallback(Span { #[cfg(span_locations)] lo, #[cfg(span_locations)] @@ -831,7 +936,7 @@ bracketed.push_token_from_parser(TokenTree::Punct(equal)); bracketed.push_token_from_parser(TokenTree::Literal(literal)); let group = Group::new(Delimiter::Bracket, bracketed.build()); - let mut group = crate::Group::_new_stable(group); + let mut group = crate::Group::_new_fallback(group); group.set_span(span); trees.push_token_from_parser(TokenTree::Group(group)); @@ -848,7 +953,7 @@ Ok((input, (&s[3..s.len() - 2], true))) } else if input.starts_with("///") { let input = input.advance(3); - if input.starts_with("/") { + if input.starts_with_char('/') { return Err(Reject); } let (input, s) = take_until_newline_or_eof(input); diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/rcvec.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/rcvec.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/rcvec.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/rcvec.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,7 +1,8 @@ +use alloc::rc::Rc; +use alloc::vec; use core::mem; +use core::panic::RefUnwindSafe; use core::slice; -use std::rc::Rc; -use std::vec; pub(crate) struct RcVec { inner: Rc>, @@ -52,7 +53,7 @@ T: Clone, { let vec = if let Some(owned) = Rc::get_mut(&mut self.inner) { - mem::replace(owned, Vec::new()) + mem::take(owned) } else { Vec::clone(&self.inner) }; @@ -140,3 +141,5 @@ self.inner.size_hint() } } + +impl RefUnwindSafe for RcVec where T: RefUnwindSafe {} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/wrapper.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/wrapper.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/wrapper.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/src/wrapper.rs 2023-08-15 22:24:19.000000000 +0000 @@ -3,7 +3,6 @@ use crate::location::LineColumn; use crate::{fallback, Delimiter, Punct, Spacing, TokenTree}; use core::fmt::{self, Debug, Display}; -use core::iter::FromIterator; use core::ops::RangeBounds; use core::str::FromStr; use std::panic; @@ -40,7 +39,7 @@ } fn mismatch() -> ! { - panic!("stable/nightly mismatch") + panic!("compiler/fallback mismatch") } impl DeferredTokenStream { @@ -286,15 +285,7 @@ impl Display for LexError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - #[cfg(not(no_lexerror_display))] LexError::Compiler(e) => Display::fmt(e, f), - #[cfg(no_lexerror_display)] - LexError::Compiler(_e) => Display::fmt( - &fallback::LexError { - span: fallback::Span::call_site(), - }, - f, - ), LexError::Fallback(e) => Display::fmt(e, f), } } @@ -406,7 +397,6 @@ } } - #[cfg(not(no_hygiene))] pub fn mixed_site() -> Self { if inside_proc_macro() { Span::Compiler(proc_macro::Span::mixed_site()) @@ -426,13 +416,7 @@ pub fn resolved_at(&self, other: Span) -> Span { match (self, other) { - #[cfg(not(no_hygiene))] (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.resolved_at(b)), - - // Name resolution affects semantics, but location is only cosmetic - #[cfg(no_hygiene)] - (Span::Compiler(_), Span::Compiler(_)) => other, - (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.resolved_at(b)), _ => mismatch(), } @@ -440,13 +424,7 @@ pub fn located_at(&self, other: Span) -> Span { match (self, other) { - #[cfg(not(no_hygiene))] (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.located_at(b)), - - // Name resolution affects semantics, but location is only cosmetic - #[cfg(no_hygiene)] - (Span::Compiler(_), Span::Compiler(_)) => *self, - (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.located_at(b)), _ => mismatch(), } @@ -470,12 +448,6 @@ #[cfg(span_locations)] pub fn start(&self) -> LineColumn { match self { - #[cfg(proc_macro_span)] - Span::Compiler(s) => { - let proc_macro::LineColumn { line, column } = s.start(); - LineColumn { line, column } - } - #[cfg(not(proc_macro_span))] Span::Compiler(_) => LineColumn { line: 0, column: 0 }, Span::Fallback(s) => s.start(), } @@ -484,33 +456,11 @@ #[cfg(span_locations)] pub fn end(&self) -> LineColumn { match self { - #[cfg(proc_macro_span)] - Span::Compiler(s) => { - let proc_macro::LineColumn { line, column } = s.end(); - LineColumn { line, column } - } - #[cfg(not(proc_macro_span))] Span::Compiler(_) => LineColumn { line: 0, column: 0 }, Span::Fallback(s) => s.end(), } } - #[cfg(super_unstable)] - pub fn before(&self) -> Span { - match self { - Span::Compiler(s) => Span::Compiler(s.before()), - Span::Fallback(s) => Span::Fallback(s.before()), - } - } - - #[cfg(super_unstable)] - pub fn after(&self) -> Span { - match self { - Span::Compiler(s) => Span::Compiler(s.after()), - Span::Fallback(s) => Span::Fallback(s.after()), - } - } - pub fn join(&self, other: Span) -> Option { let ret = match (self, other) { #[cfg(proc_macro_span)] @@ -530,6 +480,16 @@ } } + pub fn source_text(&self) -> Option { + match self { + #[cfg(not(no_source_text))] + Span::Compiler(s) => s.source_text(), + #[cfg(no_source_text)] + Span::Compiler(_) => None, + Span::Fallback(s) => s.source_text(), + } + } + fn unwrap_nightly(self) -> proc_macro::Span { match self { Span::Compiler(s) => s, @@ -620,20 +580,14 @@ pub fn span_open(&self) -> Span { match self { - #[cfg(not(no_group_open_close))] Group::Compiler(g) => Span::Compiler(g.span_open()), - #[cfg(no_group_open_close)] - Group::Compiler(g) => Span::Compiler(g.span()), Group::Fallback(g) => Span::Fallback(g.span_open()), } } pub fn span_close(&self) -> Span { match self { - #[cfg(not(no_group_open_close))] Group::Compiler(g) => Span::Compiler(g.span_close()), - #[cfg(no_group_open_close)] - Group::Compiler(g) => Span::Compiler(g.span()), Group::Fallback(g) => Span::Fallback(g.span_close()), } } @@ -694,27 +648,7 @@ pub fn new_raw(string: &str, span: Span) -> Self { match span { - #[cfg(not(no_ident_new_raw))] Span::Compiler(s) => Ident::Compiler(proc_macro::Ident::new_raw(string, s)), - #[cfg(no_ident_new_raw)] - Span::Compiler(s) => { - let _ = proc_macro::Ident::new(string, s); - // At this point the un-r#-prefixed string is known to be a - // valid identifier. Try to produce a valid raw identifier by - // running the `TokenStream` parser, and unwrapping the first - // token as an `Ident`. - let raw_prefixed = format!("r#{}", string); - if let Ok(ts) = raw_prefixed.parse::() { - let mut iter = ts.into_iter(); - if let (Some(proc_macro::TokenTree::Ident(mut id)), None) = - (iter.next(), iter.next()) - { - id.set_span(s); - return Ident::Compiler(id); - } - } - panic!("not allowed as a raw identifier: `{}`", raw_prefixed) - } Span::Fallback(s) => Ident::Fallback(fallback::Ident::new_raw(string, s)), } } @@ -816,7 +750,7 @@ impl Literal { pub unsafe fn from_str_unchecked(repr: &str) -> Self { if inside_proc_macro() { - Literal::Compiler(compiler_literal_from_str(repr).expect("invalid literal")) + Literal::Compiler(proc_macro::Literal::from_str(repr).expect("invalid literal")) } else { Literal::Fallback(fallback::Literal::from_str_unchecked(repr)) } @@ -939,7 +873,8 @@ fn from_str(repr: &str) -> Result { if inside_proc_macro() { - compiler_literal_from_str(repr).map(Literal::Compiler) + let literal = proc_macro::Literal::from_str(repr)?; + Ok(Literal::Compiler(literal)) } else { let literal = fallback::Literal::from_str(repr)?; Ok(Literal::Fallback(literal)) @@ -947,24 +882,6 @@ } } -fn compiler_literal_from_str(repr: &str) -> Result { - #[cfg(not(no_literal_from_str))] - { - proc_macro::Literal::from_str(repr).map_err(LexError::Compiler) - } - #[cfg(no_literal_from_str)] - { - let tokens = proc_macro_parse(repr)?; - let mut iter = tokens.into_iter(); - if let (Some(proc_macro::TokenTree::Literal(literal)), None) = (iter.next(), iter.next()) { - if literal.to_string().len() == repr.len() { - return Ok(literal); - } - } - Err(LexError::call_site()) - } -} - impl Display for Literal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/marker.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/marker.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/marker.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/marker.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,3 +1,5 @@ +#![allow(clippy::extra_unused_type_parameters)] + use proc_macro2::{ Delimiter, Group, Ident, LexError, Literal, Punct, Spacing, Span, TokenStream, TokenTree, }; @@ -60,7 +62,6 @@ assert_impl!(SourceFile is not Send or Sync); } -#[cfg(not(no_libprocmacro_unwind_safe))] mod unwind_safe { use proc_macro2::{ Delimiter, Group, Ident, LexError, Literal, Punct, Spacing, Span, TokenStream, TokenTree, diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/test_fmt.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/test_fmt.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/test_fmt.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/test_fmt.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,7 +1,7 @@ #![allow(clippy::from_iter_instead_of_collect)] use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream, TokenTree}; -use std::iter::{self, FromIterator}; +use std::iter; #[test] fn test_fmt_group() { diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/test.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/test.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/test.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/test.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,12 +1,12 @@ #![allow( clippy::assertions_on_result_states, clippy::items_after_statements, - clippy::non_ascii_literal + clippy::non_ascii_literal, + clippy::octal_escapes )] use proc_macro2::{Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; use std::iter; -use std::panic; use std::str::{self, FromStr}; #[test] @@ -89,24 +89,9 @@ } #[test] +#[should_panic(expected = r#""'a#" is not a valid Ident"#)] fn lifetime_invalid() { - let result = panic::catch_unwind(|| Ident::new("'a#", Span::call_site())); - match result { - Err(box_any) => { - let message = box_any.downcast_ref::().unwrap(); - let expected1 = r#""\'a#" is not a valid Ident"#; // 1.31.0 .. 1.53.0 - let expected2 = r#""'a#" is not a valid Ident"#; // 1.53.0 .. - assert!( - message == expected1 || message == expected2, - "panic message does not match expected string\n\ - \x20 panic message: `{:?}`\n\ - \x20expected message: `{:?}`", - message, - expected2, - ); - } - Ok(_) => panic!("test did not panic as expected"), - } + Ident::new("'a#", Span::call_site()); } #[test] @@ -114,6 +99,13 @@ assert_eq!(Literal::string("foo").to_string(), "\"foo\""); assert_eq!(Literal::string("\"").to_string(), "\"\\\"\""); assert_eq!(Literal::string("didn't").to_string(), "\"didn't\""); + assert_eq!( + Literal::string("a\00b\07c\08d\0e\0").to_string(), + "\"a\\x000b\\x007c\\08d\\0e\\0\"", + ); + + "\"\\\r\n x\"".parse::().unwrap(); + "\"\\\r\n \rx\"".parse::().unwrap_err(); } #[test] @@ -147,6 +139,51 @@ Literal::byte_string(b"\0\t\n\r\"\\2\x10").to_string(), "b\"\\0\\t\\n\\r\\\"\\\\2\\x10\"", ); + assert_eq!( + Literal::byte_string(b"a\00b\07c\08d\0e\0").to_string(), + "b\"a\\x000b\\x007c\\08d\\0e\\0\"", + ); + + "b\"\\\r\n x\"".parse::().unwrap(); + "b\"\\\r\n \rx\"".parse::().unwrap_err(); + "b\"\\\r\n \u{a0}x\"".parse::().unwrap_err(); + "br\"\u{a0}\"".parse::().unwrap_err(); +} + +#[test] +fn literal_c_string() { + let strings = r###" + c"hello\x80我叫\u{1F980}" // from the RFC + cr"\" + cr##"Hello "world"!"## + c"\t\n\r\"\\" + "###; + + let mut tokens = strings.parse::().unwrap().into_iter(); + + for expected in &[ + r#"c"hello\x80我叫\u{1F980}""#, + r#"cr"\""#, + r###"cr##"Hello "world"!"##"###, + r#"c"\t\n\r\"\\""#, + ] { + match tokens.next().unwrap() { + TokenTree::Literal(literal) => { + assert_eq!(literal.to_string(), *expected); + } + unexpected => panic!("unexpected token: {:?}", unexpected), + } + } + + if let Some(unexpected) = tokens.next() { + panic!("unexpected token: {:?}", unexpected); + } + + for invalid in &[r#"c"\0""#, r#"c"\x00""#, r#"c"\u{0}""#, "c\"\0\""] { + if let Ok(unexpected) = invalid.parse::() { + panic!("unexpected token: {:?}", unexpected); + } + } } #[test] @@ -265,6 +302,30 @@ } #[test] +fn literal_span() { + let positive = "0.1".parse::().unwrap(); + let negative = "-0.1".parse::().unwrap(); + let subspan = positive.subspan(1..2); + + #[cfg(not(span_locations))] + { + let _ = negative; + assert!(subspan.is_none()); + } + + #[cfg(span_locations)] + { + assert_eq!(positive.span().start().column, 0); + assert_eq!(positive.span().end().column, 3); + assert_eq!(negative.span().start().column, 0); + assert_eq!(negative.span().end().column, 4); + assert_eq!(subspan.unwrap().source_text().unwrap(), "."); + } + + assert!(positive.subspan(1..4).is_none()); +} + +#[test] fn roundtrip() { fn roundtrip(p: &str) { println!("parse: {}", p); @@ -603,8 +664,8 @@ check_spans("/*** ábc */ x", &[(1, 12, 1, 13)]); check_spans(r#""abc""#, &[(1, 0, 1, 5)]); check_spans(r#""ábc""#, &[(1, 0, 1, 5)]); - check_spans(r###"r#"abc"#"###, &[(1, 0, 1, 8)]); - check_spans(r###"r#"ábc"#"###, &[(1, 0, 1, 8)]); + check_spans(r##"r#"abc"#"##, &[(1, 0, 1, 8)]); + check_spans(r##"r#"ábc"#"##, &[(1, 0, 1, 8)]); check_spans("r#\"a\nc\"#", &[(1, 0, 2, 3)]); check_spans("r#\"á\nc\"#", &[(1, 0, 2, 3)]); check_spans("'a'", &[(1, 0, 1, 3)]); @@ -624,7 +685,6 @@ check_spans("ábc// foo", &[(1, 0, 1, 3)]); check_spans("ábć// foo", &[(1, 0, 1, 3)]); check_spans("b\"a\\\n c\"", &[(1, 0, 2, 3)]); - check_spans("b\"a\\\n\u{00a0}c\"", &[(1, 0, 2, 3)]); } #[cfg(span_locations)] @@ -656,6 +716,18 @@ } #[test] +fn whitespace() { + // space, horizontal tab, vertical tab, form feed, carriage return, line + // feed, non-breaking space, left-to-right mark, right-to-left mark + let various_spaces = " \t\u{b}\u{c}\r\n\u{a0}\u{200e}\u{200f}"; + let tokens = various_spaces.parse::().unwrap(); + assert_eq!(tokens.into_iter().count(), 0); + + let lone_carriage_returns = " \r \r\r\n "; + lone_carriage_returns.parse::().unwrap(); +} + +#[test] fn byte_order_mark() { let string = "\u{feff}foo"; let tokens = string.parse::().unwrap(); diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/test_size.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/test_size.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/test_size.rs 1970-01-01 00:00:00.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/proc-macro2/tests/test_size.rs 2023-08-15 22:24:19.000000000 +0000 @@ -0,0 +1,42 @@ +extern crate proc_macro; + +use std::mem; + +#[rustversion::attr(before(1.32), ignore)] +#[test] +fn test_proc_macro_span_size() { + assert_eq!(mem::size_of::(), 4); + assert_eq!(mem::size_of::>(), 4); +} + +#[cfg_attr(not(all(not(wrap_proc_macro), not(span_locations))), ignore)] +#[test] +fn test_proc_macro2_fallback_span_size_without_locations() { + assert_eq!(mem::size_of::(), 0); + assert_eq!(mem::size_of::>(), 1); +} + +#[cfg_attr(not(all(not(wrap_proc_macro), span_locations)), ignore)] +#[test] +fn test_proc_macro2_fallback_span_size_with_locations() { + assert_eq!(mem::size_of::(), 8); + assert_eq!(mem::size_of::>(), 12); +} + +#[rustversion::attr(before(1.32), ignore)] +#[rustversion::attr( + since(1.32), + cfg_attr(not(all(wrap_proc_macro, not(span_locations))), ignore) +)] +#[test] +fn test_proc_macro2_wrapper_span_size_without_locations() { + assert_eq!(mem::size_of::(), 4); + assert_eq!(mem::size_of::>(), 8); +} + +#[cfg_attr(not(all(wrap_proc_macro, span_locations)), ignore)] +#[test] +fn test_proc_macro2_wrapper_span_size_with_locations() { + assert_eq!(mem::size_of::(), 12); + assert_eq!(mem::size_of::>(), 12); +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/.cargo-checksum.json clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/.cargo-checksum.json --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/.cargo-checksum.json 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/.cargo-checksum.json 2023-08-15 22:24:19.000000000 +0000 @@ -1 +1 @@ -{"files":{"Cargo.toml":"b31678b5e9696b0320493f7120e873490183308fc5afb052dc23a265048b8e16","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"66f3cf08338e47618fd23d810355b075da573815d9c1e158a7f7ab140decc16d","build.rs":"3733c86ae2733629f873f93c2f45da30164beee8de9ee0833099fac6a05a3e6b","rust-toolchain.toml":"6bbb61302978c736b2da03e4fb40e3beab908f85d533ab46fd541e637b5f3e0f","src/ext.rs":"9881576cac3e476a4bf04f9b601cf9a53b79399fb0ca9634e8b861ac91709843","src/format.rs":"c595015418f35e6992e710441b9999f09b2afe4678b138039d670d100c0bdd86","src/ident_fragment.rs":"66788c5f57681547d936a9bcf51873b658630c76b2e690df4b3158edf573384a","src/lib.rs":"5f0dac39c736d01c698745909c93efb7e701aed4493c488a32239a7efd7d4469","src/runtime.rs":"79bbb2fe5b18bc3ec9f8f8143bd120b45680a3027c89f37b0a6a6b97bdaadb21","src/spanned.rs":"43ff919f1d2d27dff6b2db409539b1c697e913eb8c3131cf5de45a845752b7b5","src/to_tokens.rs":"99bb6f467289c32af6c1f7af0d45cc6ac7b31e2436774e616770152a49e6ac0f","tests/compiletest.rs":"022a8e400ef813d7ea1875b944549cee5125f6a995dc33e93b48cba3e1b57bd1","tests/test.rs":"c4967a33fcf7c2effd1979bcb4c03ae797359eeab92c627ab4b609cd8678ff78","tests/ui/does-not-have-iter-interpolated-dup.rs":"ad13eea21d4cdd2ab6c082f633392e1ff20fb0d1af5f2177041e0bf7f30da695","tests/ui/does-not-have-iter-interpolated-dup.stderr":"be67a6c99eed689aa08b46afd0ab3ed4e71fde42e5efed41ab05741710f42fe5","tests/ui/does-not-have-iter-interpolated.rs":"83a5b3f240651adcbe4b6e51076d76d653ad439b37442cf4054f1fd3c073f3b7","tests/ui/does-not-have-iter-interpolated.stderr":"ed05bc229abf5a267ea3d194336a3a845a061bd10c1be7020b9351f81e737946","tests/ui/does-not-have-iter-separated.rs":"fe413c48331d5e3a7ae5fef6a5892a90c72f610d54595879eb49d0a94154ba3f","tests/ui/does-not-have-iter-separated.stderr":"873f4db0ec63606d64d46790f3ee24bdb4dd04379b8e57dc5ac1114cc3775fb3","tests/ui/does-not-have-iter.rs":"09dc9499d861b63cebb0848b855b78e2dc9497bfde37ba6339f3625ae009a62f","tests/ui/does-not-have-iter.stderr":"0e3de2635a79cce9226113fa8cb6bdbdc0ffcd487d7537d4dd0dc8222adf4a8a","tests/ui/not-quotable.rs":"d630ed8e5fe16f125015999d068569cc3fe5dc1033a56e622690ec2c080c13f4","tests/ui/not-quotable.stderr":"4b81ec7bb82ba197ede6d47c1b6e5cacc0999cc8c9e2fa77a46db5e29397153c","tests/ui/not-repeatable.rs":"dbfedcad67b57543aa7d6684b6549db90fbdb74ffebcae42323d31eb88e59c87","tests/ui/not-repeatable.stderr":"a578a6293fef33c54f8e8114bf72a933a1315b45e866e4bcef1e31ce2ce55dcd","tests/ui/wrong-type-span.rs":"6195e35ea844c0c52ba1cff5d790c3a371af6915d137d377834ad984229ef9ea","tests/ui/wrong-type-span.stderr":"c986de5cb858272636c9e36ae5f57e5ee13589d4f1a73a050b21824010314f8d"},"package":"8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"} \ No newline at end of file +{"files":{"Cargo.toml":"5969ab348602ad0e3909923146807be5bd7d650692c09e8028fcdfa0886d4a71","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"66f3cf08338e47618fd23d810355b075da573815d9c1e158a7f7ab140decc16d","build.rs":"3733c86ae2733629f873f93c2f45da30164beee8de9ee0833099fac6a05a3e6b","rust-toolchain.toml":"6bbb61302978c736b2da03e4fb40e3beab908f85d533ab46fd541e637b5f3e0f","src/ext.rs":"9881576cac3e476a4bf04f9b601cf9a53b79399fb0ca9634e8b861ac91709843","src/format.rs":"c595015418f35e6992e710441b9999f09b2afe4678b138039d670d100c0bdd86","src/ident_fragment.rs":"66788c5f57681547d936a9bcf51873b658630c76b2e690df4b3158edf573384a","src/lib.rs":"7938cba6edf3be1f8c2c4432d3c6c171104375aec4b9c213437c89656fd8d3f2","src/runtime.rs":"31b2159986c68dc1c78801a92f795435dbc0bcea859ca342df280889e82c6c4d","src/spanned.rs":"0ccaae1137af5f3e54eae75c3bdc637be74cfa56a857f2c0f85a041c9ba26838","src/to_tokens.rs":"99bb6f467289c32af6c1f7af0d45cc6ac7b31e2436774e616770152a49e6ac0f","tests/compiletest.rs":"022a8e400ef813d7ea1875b944549cee5125f6a995dc33e93b48cba3e1b57bd1","tests/test.rs":"3be80741f84a707376c230d9cf70ce9537caa359691d8d4c34968e28175e4ad7","tests/ui/does-not-have-iter-interpolated-dup.rs":"ad13eea21d4cdd2ab6c082f633392e1ff20fb0d1af5f2177041e0bf7f30da695","tests/ui/does-not-have-iter-interpolated-dup.stderr":"09406a4bcf96587a739df7053251c8e07ea520f8d20b13f8fbea33f9c29e019b","tests/ui/does-not-have-iter-interpolated.rs":"83a5b3f240651adcbe4b6e51076d76d653ad439b37442cf4054f1fd3c073f3b7","tests/ui/does-not-have-iter-interpolated.stderr":"626170deaca60092f1992262afe7598e03ef8e3821ebe91d0a643edd8346e9ac","tests/ui/does-not-have-iter-separated.rs":"fe413c48331d5e3a7ae5fef6a5892a90c72f610d54595879eb49d0a94154ba3f","tests/ui/does-not-have-iter-separated.stderr":"03fd560979ebcd5aa6f83858bc2c3c01ba6546c16335101275505304895c1ae9","tests/ui/does-not-have-iter.rs":"09dc9499d861b63cebb0848b855b78e2dc9497bfde37ba6339f3625ae009a62f","tests/ui/does-not-have-iter.stderr":"d6da483c29e232ced72059bbdf05d31afb1df9e02954edaa9cfaea1ec6df72dc","tests/ui/not-quotable.rs":"5759d0884943417609f28faadc70254a3e2fd3d9bd6ff7297a3fb70a77fafd8a","tests/ui/not-quotable.stderr":"efcace9419fdf64d6beca7e135c3b7daff74038d4449475896cbe8cbf2566ade","tests/ui/not-repeatable.rs":"a4b115c04e4e41049a05f5b69450503fbffeba031218b4189cb931839f7f9a9c","tests/ui/not-repeatable.stderr":"594249d59d16f039c16816f1aaf9933176994e296fcf81d1b8b24d5b66ae0d0a","tests/ui/wrong-type-span.rs":"6195e35ea844c0c52ba1cff5d790c3a371af6915d137d377834ad984229ef9ea","tests/ui/wrong-type-span.stderr":"cad072e40e0ecc04f375122ae41aede2f0da2a9244492b3fcf70249e59d1b128"},"package":"4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"} \ No newline at end of file diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/Cargo.toml clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/Cargo.toml --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/Cargo.toml 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/Cargo.toml 2023-08-15 22:24:19.000000000 +0000 @@ -13,7 +13,7 @@ edition = "2018" rust-version = "1.31" name = "quote" -version = "1.0.23" +version = "1.0.26" authors = ["David Tolnay "] autobenches = false description = "Quasi-quoting macro quote!(...)" @@ -34,7 +34,7 @@ doc-scrape-examples = false [dependencies.proc-macro2] -version = "1.0.40" +version = "1.0.52" default-features = false [dev-dependencies.rustversion] diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/LICENSE-APACHE clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/LICENSE-APACHE --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/LICENSE-APACHE 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/LICENSE-APACHE 2023-08-15 22:24:19.000000000 +0000 @@ -174,28 +174,3 @@ of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/src/lib.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/src/lib.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/src/lib.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/src/lib.rs 2023-08-15 22:24:19.000000000 +0000 @@ -81,7 +81,7 @@ //! ``` // Quote types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/quote/1.0.23")] +#![doc(html_root_url = "https://docs.rs/quote/1.0.26")] #![allow( clippy::doc_markdown, clippy::missing_errors_doc, @@ -619,14 +619,14 @@ #[macro_export] macro_rules! quote_spanned { ($span:expr=>) => {{ - let _: $crate::__private::Span = $span; + let _: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); $crate::__private::TokenStream::new() }}; // Special case rule for a single tt, for performance. ($span:expr=> $tt:tt) => {{ let mut _s = $crate::__private::TokenStream::new(); - let _span: $crate::__private::Span = $span; + let _span: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); $crate::quote_token_spanned!{$tt _s _span} _s }}; @@ -634,13 +634,13 @@ // Special case rules for two tts, for performance. ($span:expr=> # $var:ident) => {{ let mut _s = $crate::__private::TokenStream::new(); - let _: $crate::__private::Span = $span; + let _: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); $crate::ToTokens::to_tokens(&$var, &mut _s); _s }}; ($span:expr=> $tt1:tt $tt2:tt) => {{ let mut _s = $crate::__private::TokenStream::new(); - let _span: $crate::__private::Span = $span; + let _span: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); $crate::quote_token_spanned!{$tt1 _s _span} $crate::quote_token_spanned!{$tt2 _s _span} _s @@ -649,7 +649,7 @@ // Rule for any other number of tokens. ($span:expr=> $($tt:tt)*) => {{ let mut _s = $crate::__private::TokenStream::new(); - let _span: $crate::__private::Span = $span; + let _span: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); $crate::quote_each_token_spanned!{_s _span $($tt)*} _s }}; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/src/runtime.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/src/runtime.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/src/runtime.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/src/runtime.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,10 +1,12 @@ +use self::get_span::{GetSpan, GetSpanBase, GetSpanInner}; use crate::{IdentFragment, ToTokens, TokenStreamExt}; use core::fmt; use core::iter; use core::ops::BitOr; +use proc_macro2::{Group, Ident, Punct, Spacing, TokenTree}; pub use core::option::Option; -pub use proc_macro2::*; +pub use proc_macro2::{Delimiter, Span, TokenStream}; pub use std::format; pub struct HasIterator; // True @@ -164,6 +166,62 @@ } } +#[inline] +pub fn get_span(span: T) -> GetSpan { + GetSpan(GetSpanInner(GetSpanBase(span))) +} + +mod get_span { + use core::ops::Deref; + use proc_macro2::extra::DelimSpan; + use proc_macro2::Span; + + pub struct GetSpan(pub(crate) GetSpanInner); + + pub struct GetSpanInner(pub(crate) GetSpanBase); + + pub struct GetSpanBase(pub(crate) T); + + impl GetSpan { + #[inline] + pub fn __into_span(self) -> Span { + ((self.0).0).0 + } + } + + impl GetSpanInner { + #[inline] + pub fn __into_span(&self) -> Span { + (self.0).0.join() + } + } + + impl GetSpanBase { + #[allow(clippy::unused_self)] + pub fn __into_span(&self) -> T { + unreachable!() + } + } + + impl Deref for GetSpan { + type Target = GetSpanInner; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + impl Deref for GetSpanInner { + type Target = GetSpanBase; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } + } +} + pub fn push_group(tokens: &mut TokenStream, delimiter: Delimiter, inner: TokenStream) { tokens.append(Group::new(delimiter, inner)); } diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/src/spanned.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/src/spanned.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/src/spanned.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/src/spanned.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,7 +1,9 @@ use crate::ToTokens; +use proc_macro2::extra::DelimSpan; use proc_macro2::{Span, TokenStream}; -pub trait Spanned { +// Not public API other than via the syn crate. Use syn::spanned::Spanned. +pub trait Spanned: private::Sealed { fn __span(&self) -> Span; } @@ -11,6 +13,12 @@ } } +impl Spanned for DelimSpan { + fn __span(&self) -> Span { + self.join() + } +} + impl Spanned for T { fn __span(&self) -> Span { join_spans(self.into_token_stream()) @@ -41,3 +49,14 @@ .and_then(|last| first.join(last)) .unwrap_or(first) } + +mod private { + use crate::ToTokens; + use proc_macro2::extra::DelimSpan; + use proc_macro2::Span; + + pub trait Sealed {} + impl Sealed for Span {} + impl Sealed for DelimSpan {} + impl Sealed for T {} +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/test.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/test.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/test.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/test.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,14 +1,17 @@ #![allow( clippy::disallowed_names, + clippy::let_underscore_untyped, clippy::shadow_unrelated, clippy::unseparated_literal_suffix, clippy::used_underscore_binding )] +extern crate proc_macro; + use std::borrow::Cow; use std::collections::BTreeSet; -use proc_macro2::{Ident, Span, TokenStream}; +use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream}; use quote::{format_ident, quote, quote_spanned, TokenStreamExt}; struct X; @@ -517,3 +520,30 @@ let id = quote!(r#raw_id); assert_eq!(id.to_string(), "r#raw_id"); } + +#[test] +fn test_type_inference_for_span() { + trait CallSite { + fn get() -> Self; + } + + impl CallSite for Span { + fn get() -> Self { + Span::call_site() + } + } + + let span = Span::call_site(); + let _ = quote_spanned!(span=> ...); + + let delim_span = Group::new(Delimiter::Parenthesis, TokenStream::new()).delim_span(); + let _ = quote_spanned!(delim_span=> ...); + + let inferred = CallSite::get(); + let _ = quote_spanned!(inferred=> ...); + + if false { + let proc_macro_span = proc_macro::Span::call_site(); + let _ = quote_spanned!(proc_macro_span.into()=> ...); + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.stderr clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.stderr --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.stderr 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.stderr 2023-08-15 22:24:19.000000000 +0000 @@ -4,7 +4,7 @@ 8 | quote!(#(#nonrep #nonrep)*); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ | | - | expected struct `HasIterator`, found struct `ThereIsNoIteratorInRepetition` + | expected `HasIterator`, found `ThereIsNoIteratorInRepetition` | expected due to this | = note: this error originates in the macro `$crate::quote_token_with_context` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter-interpolated.stderr clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter-interpolated.stderr --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter-interpolated.stderr 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter-interpolated.stderr 2023-08-15 22:24:19.000000000 +0000 @@ -4,7 +4,7 @@ 8 | quote!(#(#nonrep)*); | ^^^^^^^^^^^^^^^^^^^ | | - | expected struct `HasIterator`, found struct `ThereIsNoIteratorInRepetition` + | expected `HasIterator`, found `ThereIsNoIteratorInRepetition` | expected due to this | = note: this error originates in the macro `$crate::quote_token_with_context` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter-separated.stderr clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter-separated.stderr --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter-separated.stderr 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter-separated.stderr 2023-08-15 22:24:19.000000000 +0000 @@ -4,7 +4,7 @@ 4 | quote!(#(a b),*); | ^^^^^^^^^^^^^^^^ | | - | expected struct `HasIterator`, found struct `ThereIsNoIteratorInRepetition` + | expected `HasIterator`, found `ThereIsNoIteratorInRepetition` | expected due to this | = note: this error originates in the macro `$crate::quote_token_with_context` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter.stderr clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter.stderr --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter.stderr 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/does-not-have-iter.stderr 2023-08-15 22:24:19.000000000 +0000 @@ -4,7 +4,7 @@ 4 | quote!(#(a b)*); | ^^^^^^^^^^^^^^^ | | - | expected struct `HasIterator`, found struct `ThereIsNoIteratorInRepetition` + | expected `HasIterator`, found `ThereIsNoIteratorInRepetition` | expected due to this | = note: this error originates in the macro `$crate::quote_token_with_context` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-quotable.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-quotable.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-quotable.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-quotable.rs 2023-08-15 22:24:19.000000000 +0000 @@ -3,5 +3,5 @@ fn main() { let ip = Ipv4Addr::LOCALHOST; - _ = quote! { #ip }; + let _ = quote! { #ip }; } diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-quotable.stderr clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-quotable.stderr --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-quotable.stderr 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-quotable.stderr 2023-08-15 22:24:19.000000000 +0000 @@ -1,11 +1,11 @@ error[E0277]: the trait bound `Ipv4Addr: ToTokens` is not satisfied - --> tests/ui/not-quotable.rs:6:9 + --> tests/ui/not-quotable.rs:6:13 | -6 | _ = quote! { #ip }; - | ^^^^^^^^^^^^^^ - | | - | the trait `ToTokens` is not implemented for `Ipv4Addr` - | required by a bound introduced by this call +6 | let _ = quote! { #ip }; + | ^^^^^^^^^^^^^^ + | | + | the trait `ToTokens` is not implemented for `Ipv4Addr` + | required by a bound introduced by this call | = help: the following other types implement trait `ToTokens`: &'a T diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-repeatable.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-repeatable.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-repeatable.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-repeatable.rs 2023-08-15 22:24:19.000000000 +0000 @@ -4,5 +4,5 @@ fn main() { let ip = Ipv4Addr; - _ = quote! { #(#ip)* }; + let _ = quote! { #(#ip)* }; } diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-repeatable.stderr clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-repeatable.stderr --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-repeatable.stderr 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/not-repeatable.stderr 2023-08-15 22:24:19.000000000 +0000 @@ -1,5 +1,5 @@ error[E0599]: the method `quote_into_iter` exists for struct `Ipv4Addr`, but its trait bounds were not satisfied - --> tests/ui/not-repeatable.rs:7:9 + --> tests/ui/not-repeatable.rs:7:13 | 3 | struct Ipv4Addr; | --------------- @@ -10,8 +10,8 @@ | doesn't satisfy `Ipv4Addr: ext::RepIteratorExt` | doesn't satisfy `Ipv4Addr: ext::RepToTokensExt` ... -7 | _ = quote! { #(#ip)* }; - | ^^^^^^^^^^^^^^^^^^ method cannot be called on `Ipv4Addr` due to unsatisfied trait bounds +7 | let _ = quote! { #(#ip)* }; + | ^^^^^^^^^^^^^^^^^^ method cannot be called on `Ipv4Addr` due to unsatisfied trait bounds | = note: the following trait bounds were not satisfied: `Ipv4Addr: Iterator` diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/wrong-type-span.stderr clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/wrong-type-span.stderr --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/wrong-type-span.stderr 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/quote/tests/ui/wrong-type-span.stderr 2023-08-15 22:24:19.000000000 +0000 @@ -1,8 +1,10 @@ error[E0308]: mismatched types - --> tests/ui/wrong-type-span.rs:6:20 + --> tests/ui/wrong-type-span.rs:6:5 | 6 | quote_spanned!(span=> #x); - | ---------------^^^^------ - | | | - | | expected struct `Span`, found `&str` + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | expected `Span`, found `&str` | expected due to this + | + = note: this error originates in the macro `quote_spanned` (in Nightly builds, run with -Z macro-backtrace for more info) diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/benches/file.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/benches/file.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/benches/file.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/benches/file.rs 2023-08-15 22:24:19.000000000 +0000 @@ -4,8 +4,11 @@ #![recursion_limit = "1024"] #![allow( clippy::items_after_statements, + clippy::manual_let_else, + clippy::match_like_matches_macro, clippy::missing_panics_doc, - clippy::must_use_candidate + clippy::must_use_candidate, + clippy::uninlined_format_args )] extern crate test; @@ -14,10 +17,9 @@ #[path = "../tests/macros/mod.rs"] mod macros; -#[path = "../tests/common/mod.rs"] -mod common; +#[allow(dead_code)] #[path = "../tests/repo/mod.rs"] -pub mod repo; +mod repo; use proc_macro2::{Span, TokenStream}; use std::fs; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/benches/rust.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/benches/rust.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/benches/rust.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/benches/rust.rs 2023-08-15 22:24:19.000000000 +0000 @@ -5,14 +5,20 @@ #![cfg_attr(not(syn_only), feature(rustc_private))] #![recursion_limit = "1024"] -#![allow(clippy::cast_lossless, clippy::unnecessary_wraps)] +#![allow( + clippy::cast_lossless, + clippy::let_underscore_untyped, + clippy::manual_let_else, + clippy::match_like_matches_macro, + clippy::uninlined_format_args, + clippy::unnecessary_wraps +)] #[macro_use] #[path = "../tests/macros/mod.rs"] mod macros; -#[path = "../tests/common/mod.rs"] -mod common; +#[allow(dead_code)] #[path = "../tests/repo/mod.rs"] mod repo; @@ -38,6 +44,7 @@ #[cfg(not(syn_only))] mod librustc_parse { extern crate rustc_data_structures; + extern crate rustc_driver; extern crate rustc_error_messages; extern crate rustc_errors; extern crate rustc_parse; @@ -91,7 +98,7 @@ #[cfg(not(syn_only))] mod read_from_disk { pub fn bench(content: &str) -> Result<(), ()> { - _ = content; + let _ = content; Ok(()) } } @@ -101,9 +108,13 @@ let mut success = 0; let mut total = 0; - walkdir::WalkDir::new("tests/rust/src") - .into_iter() - .filter_entry(repo::base_dir_filter) + ["tests/rust/compiler", "tests/rust/library"] + .iter() + .flat_map(|dir| { + walkdir::WalkDir::new(dir) + .into_iter() + .filter_entry(repo::base_dir_filter) + }) .for_each(|entry| { let entry = entry.unwrap(); let path = entry.path(); diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/build.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/build.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/build.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/build.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -use std::env; -use std::process::Command; -use std::str; - -// The rustc-cfg strings below are *not* public API. Please let us know by -// opening a GitHub issue if your build environment requires some way to enable -// these cfgs other than by executing our build script. -fn main() { - let compiler = match rustc_version() { - Some(compiler) => compiler, - None => return, - }; - - if compiler.minor < 36 { - println!("cargo:rustc-cfg=syn_omit_await_from_token_macro"); - } - - if compiler.minor < 39 { - println!("cargo:rustc-cfg=syn_no_const_vec_new"); - } - - if compiler.minor < 40 { - println!("cargo:rustc-cfg=syn_no_non_exhaustive"); - } - - if compiler.minor < 56 { - println!("cargo:rustc-cfg=syn_no_negative_literal_parse"); - } - - if !compiler.nightly { - println!("cargo:rustc-cfg=syn_disable_nightly_tests"); - } -} - -struct Compiler { - minor: u32, - nightly: bool, -} - -fn rustc_version() -> Option { - let rustc = env::var_os("RUSTC")?; - let output = Command::new(rustc).arg("--version").output().ok()?; - let version = str::from_utf8(&output.stdout).ok()?; - let mut pieces = version.split('.'); - if pieces.next() != Some("rustc 1") { - return None; - } - let minor = pieces.next()?.parse().ok()?; - let nightly = version.contains("nightly") || version.ends_with("-dev"); - Some(Compiler { minor, nightly }) -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/.cargo-checksum.json clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/.cargo-checksum.json --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/.cargo-checksum.json 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/.cargo-checksum.json 2023-08-15 22:24:19.000000000 +0000 @@ -1 +1 @@ -{"files":{"Cargo.toml":"1ff565970239963d56cbfdd20476fd265fcf2d050fc5ed92a298686321985230","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"ea9f2b6340b302b5608d2bedcda7e2d707f3eaebf4cc983c02d55071ead7096f","benches/file.rs":"3d737ef3878f6e242b003af9bd539e565f98439a12ee44d9548d84e3fdd7af0c","benches/rust.rs":"11ac9fe898a7bf1bd63e8a8cc9c08bd795b01f0248215cff99afaaf28ce87fab","build.rs":"b815649fd2929d3debd93a58f5da2fb8eba506047a6a5ba538347305828a87b0","src/attr.rs":"234d9cebe2c5e92cd0f5e1117bf5755037e2e905788a337000a65d4bd82b63aa","src/await.rs":"8aa22e3c201cb2bdb6b4817fa00901f308ab06817607aa7b884c58c957705969","src/bigint.rs":"efc7f64959980653d73fe4f8bc2a3a2904dc05f45b02c6dc15cd316fa3d7c338","src/buffer.rs":"4d15f35273d485261be4f1a765ae03abc1daee9fc9dac5fb4f9b624d6b22cb58","src/custom_keyword.rs":"5c706fc3611e73d16b8c019d7ecb848a86b1ccfcd9e556f80bb6e6a4abe058a8","src/custom_punctuation.rs":"8a666298e774b0d326642f0f73284f6677d0d0a7c9e4a712c9c98d010b4d8a2c","src/data.rs":"75d2c2b5d6a01bf8a6fa2845e41663d8045a78b4b191f1a1bd7c93619d20017a","src/derive.rs":"ee24a202be2d36ccdff576dd9cd765e94b33ef2286946e6725d75b08e777d462","src/discouraged.rs":"6c6a9298f8d24f578da119557bc588f3bd928f7b79fca27d6bdfe3e786dd005f","src/drops.rs":"013385f1dd95663f1afab41abc1e2eea04181998644828935ca564c74d6462ae","src/error.rs":"b30e738fdab7d10e126350e09f7ad907bf4dc14e684b9ed9eeea001c7ee356e0","src/export.rs":"0cf50d70c32d5fddba8b1193032df62e560237c113df3e86ba26b565cc82838e","src/expr.rs":"5eea3828f3291b0ce5463ed5f0c23fc8a39aeceae68a3247ae02ae467dd35a98","src/ext.rs":"1f648cff1d705a1cea64b32b77482b97a82d2fe0aaf63b40cade91e5c02dc969","src/file.rs":"f86697655222ae294215114f4eae8e6b0b5e2a935d6c479ff8f8f889c4efd2e2","src/gen/clone.rs":"76e89fe155fedf43bc4a252af7e35319b82ce455f584bad8698fdc3f9b7f5d4e","src/gen/debug.rs":"4b05e474e864ce6bf1a5a6ab48ee6c0ecdf41a0d750237990cf2e31963bc1208","src/gen/eq.rs":"79f84836fdcd5cfa352f38055dab7c3246c7757650946c1c701234b11021652a","src/gen/fold.rs":"fcd6a05c8c8e0c36e7ede8593002528b553c8b648fbed452106fd6a8a8c9212a","src/gen/hash.rs":"575e8beae303c1eabda12bf76cbd82672268c502a8ebb8517aab18b40fdbc44e","src/gen/visit.rs":"ced9f6c17d2b3eb3553faab710cb2b3d44d6bca7d1862c8c5da09c3d45debecb","src/gen/visit_mut.rs":"966ea340c53461bf8a1c6bed3c882e4ab8b8907fd18ac35531266f7891ae5f46","src/gen_helper.rs":"ea6c66388365971db6a2fc86cbb208f7eacde77e245bc8623f27a3642a3d7741","src/generics.rs":"b81ce0d3ea0f7accef4590d5181cecc4589395865abaea60b0470da727f17340","src/group.rs":"166f0fbb365471ffa3e4f554b72c2b460cbf7e3a1f9bec6c01ef6bbbcd751041","src/ident.rs":"2443e43561abea7eea577b141422258237a663499c839923d8a5ca6fea2470db","src/item.rs":"419c4d6135a7ca7b8f94b5ba038b6af8fcb3939ae807153a19e3c82e9b01e0b7","src/lib.rs":"8c152481907905472fc3e4aae63f82ed78d4d16cf8cc286675727668760c7f2e","src/lifetime.rs":"b18862ef1e690037a4f308ea897debad7bc5038584e3b26c6d8809752ea0e3c2","src/lit.rs":"fc06ddd523f7f9971d8abdb4c8d5d51030ffb3d6810615d5575ae210a7800695","src/lookahead.rs":"e2c2b6d55906421e83dab51463b58bc6dcb582f1bff9303c8b62afefb8d71e5f","src/mac.rs":"004cb89f9697564f6c9ee837e08ead68463ef946fb4c13c6c105adf2ba364b2b","src/macros.rs":"936f503c2fcde602f05220954ecaf87625c6138d0af13d33d56c7b6530110084","src/op.rs":"9d499022902743a6a0a19223b356449a979b90e60552d0446497d72750e646a4","src/parse.rs":"7b2f8caddf25a5734cbcdf7cbf043cbf9afbc07b484966cd59ddfcec9f970fb3","src/parse_macro_input.rs":"a5d16859b782bb6a2754c1066468a2f1ea05b57390caa32175bb84064973be7b","src/parse_quote.rs":"d7d996f1382c68b5fbfd4b7327ce1d389cd43c3bb3c4f382a35994d0bb79d8ab","src/pat.rs":"b2de04ae6c01df50eab9d1c3908287aca8424adc2007b926c7bcf74d1f64d40a","src/path.rs":"58a4fb3b1ff76d32cfd84a3914f8cadbf55b363c1929222b362b7465385520ac","src/print.rs":"da6529c1d9d21aaf6c835f66b4e67eacb7cf91a10eb5e9a2143b49bf99b3b5e1","src/punctuated.rs":"44c29523dee76605be2531674fe21ed2f1bbd02559aac8b7a49c70af23129ca1","src/reserved.rs":"e70e028bd55cfa43e23cab4ba29e4dc53a3d91eff685ef2b6e57efc2b87a3428","src/sealed.rs":"896a495a5340eec898527f18bd4ddca408ea03ea0ee3af30074ff48deace778d","src/span.rs":"748c51c6feb223c26d3b1701f5bb98aee823666c775c98106cfa24fe29d8cec1","src/spanned.rs":"3ca016a943637653ab98e373dfb826a120f3c159867346fa38a844439944eb39","src/stmt.rs":"601a6914f1e0bf97ae0d31d474a531d195b8c251a4ded11aa8746ac0018d367b","src/thread.rs":"815eca6bd64f4eef7c447f0809e84108f5428ff50225224b373efd8fbb696874","src/token.rs":"5e423a696f80e281c322f37c87577f9fdc28607e9c007e24896a2b12da62d5ad","src/tt.rs":"32402645b6e82ef1e882945721b59b5fb7b0ee337d1972876362ecacef643d0f","src/ty.rs":"9befd22f8c8ac731b7f68008552a1335797a3ef19184190eec0e103e4ebe18a7","src/verbatim.rs":"96d4280e4556a1841b8dcb306bc35a94d18f71dceb63f3c27a4fe7f776191760","src/whitespace.rs":"e63dd0aa3d34029f17766a8b09c1a6e4479e36c552c8b7023d710a399333aace","tests/common/eq.rs":"e930fb0bdcec3e787986b56785b1db580e5a26a5131df2f2b91a6da37069de15","tests/common/mod.rs":"432ad35577f836a20b517d8c26ed994ac25fe73ef2f461c67688b61b99762015","tests/common/parse.rs":"81580f23583723f7a2a337c4d13ebc021057cd825562fb4e474caa7cc641fed9","tests/debug/gen.rs":"1b7f875344cb04a7dd3df62deac2f410a9d107c097986e68006d87465f5f5306","tests/debug/mod.rs":"3a6bb799f478101f71c84c6f1a854a58afe2f9db43c39017909346ca20262d94","tests/macros/mod.rs":"aff805b35cfd55aef6a1359ff747e4023afcb08d69d86aff4c19465d29dda088","tests/regression.rs":"86731134bfb9bb693d9a4fc62393027de80a8bf031109ea6c7ea475b1ebdde8d","tests/regression/issue1108.rs":"adcc55a42239d344da74216ed85fc14153ddd6ca4dec4872d8339604ba78c185","tests/regression/issue1235.rs":"a2266b10c3f7c7af5734817ab0a3e8b309b51e7d177b63f26e67e6b744d280b0","tests/repo/mod.rs":"159c2c4b6416d26ac42ffc35f6cb587c4c1e2b0f24de9aa42b0337a534d7d86d","tests/repo/progress.rs":"c08d0314a7f3ecf760d471f27da3cd2a500aeb9f1c8331bffb2aa648f9fabf3f","tests/test_asyncness.rs":"cff01db49d28ab23b0b258bc6c0a5cc4071be4fe7248eef344a5d79d2fb649b7","tests/test_attribute.rs":"0ffd99384e1a52ae17d9fed5c4053e411e8f9018decef07ffa621d1faa7329d8","tests/test_derive_input.rs":"62bb86aaaaf730187a46ff700a8e3b2d1a163039b109b6a483aa44ed2b6806fe","tests/test_expr.rs":"41eb343829ad36cdea40cd06d45a90765e7fe6f1e47dd550daf1b6096c3a7b44","tests/test_generics.rs":"54b7d2afc19aa6e9049585f4c8f7d3f0c29ac3bd11a2c769e9df76f18a4f5ecb","tests/test_grouping.rs":"6276c3c73bba649dec5c97904ad2492879f918bc887a2c425d095c654ca0d925","tests/test_ident.rs":"9eb53d1e21edf23e7c9e14dc74dcc2b2538e9221e19dbcc0a44e3acc2e90f3f6","tests/test_item.rs":"a3642c80066f1e7787becfd0278af90a6b7968d6c1249e25e81663aa454cfb2a","tests/test_iterators.rs":"9cf6fde17853ce7d5617e1de9ef901c47ca35c0f1c2dd668c0d0604d7b48598c","tests/test_lit.rs":"19740ea9cd4a980bcab9b0dcaa4b032bb6ebb137fa5e4237140b97da1d9679fa","tests/test_meta.rs":"65d4586d131f6cac66694ca5e936748ec4e7f7423af6d8da509240e6be14800b","tests/test_parse_buffer.rs":"68d857f776396d064fcc0023c37093c2fbf75ee68e8241d4014d00d1423c18e9","tests/test_parse_stream.rs":"bf1db6fab7ac396fa61012faccbe6ffbc9c3d795ed2900be75e91c5b09b0c62f","tests/test_pat.rs":"d4465f4fc3fd5d6e534ba8efabe1e0ed6da89de4ac7c96effa6bfb880c4287cf","tests/test_path.rs":"71092a5ae2c9143b92a8fe15a92d39958b3c28bd4d4275cfb2d22cbdd53ada07","tests/test_precedence.rs":"736eee861c4c7a3d7d4387d2fb1b5eced1541790d34974f72b0a5532797e73c3","tests/test_receiver.rs":"084eca59984b9a18651da52f2c4407355da3de1335916a12477652999e2d01cc","tests/test_round_trip.rs":"c3c415413d5177a728c7cbbfb7ef44aebbc6a2c821dd56695156e9e33636fd57","tests/test_shebang.rs":"f5772cadad5b56e3112cb16308b779f92bce1c3a48091fc9933deb2276a69331","tests/test_should_parse.rs":"1d3535698a446e2755bfc360676bdb161841a1f454cdef6e7556c6d06a95c89d","tests/test_size.rs":"6720d55569808244ab011364c39931f06aa509cd05f98ab908b0670e8501b3c8","tests/test_stmt.rs":"0601fc32131b5501dfcdc4b4248d46bf21e0a98a49eb19439e1a46869dfb30b7","tests/test_token_trees.rs":"43e56a701817e3c3bfd0cae54a457dd7a38ccb3ca19da41e2b995fdf20e6ed18","tests/test_ty.rs":"f71d7f7f1c038aaabea8dd4c03c0d5752c76d570f8b4885a81659825bbb4d576","tests/test_visibility.rs":"7456fcb3a6634db509748aededff9c2d8b242d511a3e5ee3022e40b232892704","tests/zzz_stable.rs":"2a862e59cb446235ed99aec0e6ada8e16d3ecc30229b29d825b7c0bbc2602989"},"package":"1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5"} \ No newline at end of file +{"files":{"Cargo.toml":"07a5542d87a02be22d8d80182280f622a65263ea468350ba6137539f7bca1a8f","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"33fd74d909172770aaf840c519f7c59ef185a4a8d21c1e5e4dcd6a398e7e1e61","benches/file.rs":"0a0527c78d849148cbb6118b4d36f72da7d4add865ba1a410e0a1be9e8dbfe0e","benches/rust.rs":"cc2f7ce7b547b746b02215c8eabeb82697bff9d54fabec70156b54f6dc6492cd","src/attr.rs":"bd5ffae18a363162f7d9c12a1b6c1d023070cbf1b060c98ebc38ef79f1de9c67","src/bigint.rs":"0299829b2f7a1a798fe2f7bc1680e4a10f9b6f4a852d09af4da2deab466c4242","src/buffer.rs":"634fed0b398163581d27b4693a481ffcef891c8e274d9b4574482a644ef9fce9","src/custom_keyword.rs":"b82199b98f67ed5c0025f5e8791b8c9a755522e54aa5ab8fbab2b01b36fdb400","src/custom_punctuation.rs":"39b38bc18553aa902a5ce842f503390c30e259b4404d5fb63d2401af7c73b527","src/data.rs":"7d217b0252a0d14b2db308ac00f48ba24a831e01a49b893f5b3ee6b580dab4cb","src/derive.rs":"3132e7f064725c7ca43f26daee93ec78037d46a935c6b0758af905cff450c15c","src/discouraged.rs":"482970b03bdee3cbc30c034f644e3293b25387db46300da5d8d8efd97dad8507","src/drops.rs":"013385f1dd95663f1afab41abc1e2eea04181998644828935ca564c74d6462ae","src/error.rs":"c75089eeb5c0a231e747cbe479e84a379841a6e8d61fd072347cfae09c8781ec","src/export.rs":"6785b6329d7bc0a5b0efbf9d28af7cdbdfe279ae9a0e21ef177b144ed6188b66","src/expr.rs":"5b90b619ec48704627adefeb437793d41148f62e94d4b1e80ad81b8ee5085a14","src/ext.rs":"3cf2d869812e5be894aa1c48bf074da262143fb2df1c9ac1b5ee965bf2a96a1c","src/file.rs":"a4d510dd0e2756bd54983dfa747601918c801e987cbf92deab44cdca6a201aeb","src/gen/clone.rs":"46540509dc99bb849014948a0c5b02ea372d5feceae5ea391c29f226f06516eb","src/gen/debug.rs":"32b2076b755f021428a0fb268a94057e1bcb1cd400feb895946703d7919b843a","src/gen/eq.rs":"aa5455b2cc0d9846d119ce001e821872df911f65133b993e3801a42e8f635f2a","src/gen/fold.rs":"45ac5b6915d5214fa1e9af84621584443f599f838ed936fa8bda3b68a9cc4b6a","src/gen/hash.rs":"4ca8239c681ea5fd7b16bb61bff9034bff09680c088f5a16e90e99013e55742f","src/gen/visit.rs":"0a10ef3a2c5cae7aed83e8ffb5da9f9c85e0fdbae82025cc411f6328bf7fda9e","src/gen/visit_mut.rs":"1f6cfa463da0f970063e70831e3ff6b07d725c77c6e20ece17c0731d90d5b4a4","src/gen_helper.rs":"750caab67ba0ba11a95ea28cd38026485227bb4aa114cdb497472386f60fdb35","src/generics.rs":"d080112c1d3084e9d701ab628cfa77881ed9398c638ba40c7e4135d9b3f1e784","src/group.rs":"fb7f24019ab612ba85f091c4edda3b2f0154f39caa18c9a139ee600afffbeefa","src/ident.rs":"711647537aee87d7249bbcdeb2cc90d146937998dd435395c85c6b18a10b5e07","src/item.rs":"6f9c8c8bd6f1a30d39e9df5e8be978c3d2d727df64c5e64fb34199f770df6a2f","src/lib.rs":"a2c3d09def47c5788759a5e2762b81521b2a90d9ecf8816d0a4165ca0729d98e","src/lifetime.rs":"531ef74507eaf942a3aedfac83bbdbc17463102a6c806f675a83a0d6dc612c52","src/lit.rs":"72214440bdfa844aa86853aec42cd6900dff47a3cab4bc8d83ad205a115c09ce","src/lookahead.rs":"376092f91a1c32e1b277db0a6790fdda151c9ec51bd971fe6a6545b5b9e73b5d","src/mac.rs":"b1cf73f34a27a8f1429125e726623a524fb5dce875eb68ead3beaffa976442c3","src/macros.rs":"4e464104c590200213635624706d83e4a0ddd5aedd826ab4aabb390000f35ae0","src/meta.rs":"43c9d06f222f5323087bb668d8b5c1cd4fdef772db1b433c9b991ea026649699","src/op.rs":"fe5db7c3373b956234ea8a1a7d129a06e5aef5db77c44c1c2fedb4aaa667ac56","src/parse.rs":"07dafec0038234eba0c15845bd85f3250f41dce6d013f49e2364666bb9732bae","src/parse_macro_input.rs":"4a753b2a6dbfefd6dc93852d66b4f6d73ebd6b8b9be74019fc476f429b9a892d","src/parse_quote.rs":"60eff4d03bf4f5977be86f49faad16d6713121f69bedd868f951bbcabf443d66","src/pat.rs":"cae5d096a31f7dfe96213f6d83a6c717ef5e2ef4a10793f4d28e2099e6ee404b","src/path.rs":"8dcedaab7ca9e9bc901fb74079e35bfca6ff9e45bc5ca75af1008c087a2c24c8","src/print.rs":"22910bf0521ab868ebd7c62601c55912d12cfb400c65723e08e5cfa3a2d111c0","src/punctuated.rs":"6c072f20c5ff0eda8916e94c415c8fd62e113faf87316be4b6e5ca64042b6b01","src/restriction.rs":"62efbc127d7e7316dd1070c0e976872de6238b2602bba1fb35df18511b4e7199","src/sealed.rs":"6ece3b3dcb30f6bb98b93d83759ca7712ee8592bef9c0511141039c38765db0e","src/span.rs":"4c13579eaf94803bcdb98696e4c3e26fd5cfb7ad46e5a727ed087e5935530a59","src/spanned.rs":"311f4ca8ab9d436df8861a8ea3411d8eff0920354457e124ac85d0579c074981","src/stmt.rs":"acd8ad6406a8e0c11de789f4907d127bdbe8fdf2be68de957298905492ec195c","src/thread.rs":"32f1d8a9890a15920bb939e51647a6630c0661c3fae282834394e4437b8aa5df","src/token.rs":"8b0b4535972fb7b3640e27cb54f80d0e61f27334f2c4c2226c6bae7958299527","src/tt.rs":"32490509abcc4a5a3c7eb5628337172b3b49d30697d2f7b7df4d8045255c13da","src/ty.rs":"6b0185102966685329c1797c6e6bbac47ffe91cb8d68218f454443ba5d252206","src/verbatim.rs":"8d2a42a0aad2a5e69d9b32ba7fb3564fce003fe0862dbc01e106f15d951f3060","src/whitespace.rs":"718a80c12cdd145358e2690f0f68ff7779a91ec17ce9fde9bb755f635fce69ad","tests/common/eq.rs":"d130722a0fe5379c34f163cd7c46678ad902e39be7abaa552155609d22128713","tests/common/mod.rs":"432ad35577f836a20b517d8c26ed994ac25fe73ef2f461c67688b61b99762015","tests/common/parse.rs":"246ddf1d303a9dbbc380e8d0689bd851cef3c3146d09d2627175deb9203b003d","tests/debug/gen.rs":"0b689be01a4f4a0d168617b0f867f248a9e3d211e259926e6ec6c10a59776d81","tests/debug/mod.rs":"dd87563bbd359401790a9c4185178539929ff9fa35a6998657af82a85731fe4c","tests/macros/mod.rs":"aff805b35cfd55aef6a1359ff747e4023afcb08d69d86aff4c19465d29dda088","tests/regression.rs":"e9565ea0efecb4136f099164ffcfa26e1996b0a27fb9c6659e90ad9bdd42e7b6","tests/regression/issue1108.rs":"f32db35244a674e22ff824ca9e5bbec2184e287b59f022db68c418b5878a2edc","tests/regression/issue1235.rs":"a2266b10c3f7c7af5734817ab0a3e8b309b51e7d177b63f26e67e6b744d280b0","tests/repo/mod.rs":"c624f94ac3238a4231dd884daf330979ccd600b2169cc76ddd2306aeebfae8d9","tests/repo/progress.rs":"c08d0314a7f3ecf760d471f27da3cd2a500aeb9f1c8331bffb2aa648f9fabf3f","tests/test_asyncness.rs":"3868181f25f7470476077f80a442a7804b6b9b371ad5917f4fd18b1002714c64","tests/test_attribute.rs":"b35550a43bbd187bb330997ba36f90c65d8fc489135b1d32ef4547f145cb7612","tests/test_derive_input.rs":"c215245c4d09052661ac5b65b34e950ea47622847bdffe648d380470f12db8f2","tests/test_expr.rs":"1d8688c51d4e8dd5a288722ec8c074320081756fcc83812f23109dffe0caddbf","tests/test_generics.rs":"b77741aa38e6ac7e1a9082faf168e7b7b92fbabf9f3fd07306676339a67394df","tests/test_grouping.rs":"ecbe3324878b2e2be42640a3dec198620cff18731fcb95ee7e94eacd11d2fec1","tests/test_ident.rs":"9eb53d1e21edf23e7c9e14dc74dcc2b2538e9221e19dbcc0a44e3acc2e90f3f6","tests/test_item.rs":"7f0255b61d0a6921313c09aaba470beefc55f1d4e66d1e24cfac7a3f63b035d8","tests/test_iterators.rs":"f4dacb5f3a8e0473dfb0d27f05270d41e79eddb4759b1fad3e88e379b4731e17","tests/test_lit.rs":"7297fed48ca248689f112f67b6f024f2f2784e29c6cd33185ac659c350834b01","tests/test_meta.rs":"3e1bb60b4bd56adb1e04b0e2d867404f0d81f7bf69caf7d8a70fc7090e079e84","tests/test_parse_buffer.rs":"3ed83ea2e50f84b80c0b543aac4bfbd379610d0911c0baa1eb94bb925bda7341","tests/test_parse_stream.rs":"a7e186272c89a239cae03053b5a039cdc073cdb46fad64b178fe76fde98405d5","tests/test_pat.rs":"fe94e084ee478d41cccea4eeb3e975386a70d36ff7cbb902ba0c767d536aab6e","tests/test_path.rs":"0033e1082b576bb3217ebd4546423d6f86fde7ee7ba3aba8c57bf137d2b42f47","tests/test_precedence.rs":"1395b213a1aa953a3b2eacc922853f8d0e3afba552325440bfbe4df6b62102a1","tests/test_receiver.rs":"af64117acd66fbf42edc476f731ecd20c88009d9cb641dbd7a1d6384ae99ae73","tests/test_round_trip.rs":"b9f133540847a04e80f6f5264290633ebdd00d058a7b1a626929966786ffbe98","tests/test_shebang.rs":"06d3acabed004767d8b3a0389bde7485a6719cad6a0d0b4ac2c7439b03586651","tests/test_should_parse.rs":"1d3535698a446e2755bfc360676bdb161841a1f454cdef6e7556c6d06a95c89d","tests/test_size.rs":"a4db2760e9438ef1a26d70238c2b5044de02878403fd424b4c9913b5ee45e004","tests/test_stmt.rs":"42a3707056da0ce3a01f8fb13e8b7631f9be6066627ac376e1874742419ad2cc","tests/test_token_trees.rs":"d012da9c3c861073711b006bf6ffdc073821fb9fb0a08733628cdae57124d1f5","tests/test_ty.rs":"f7f21f76e9e798669f09a95c380e26ae5959ee8ac5f3b765b1a799cc9505d075","tests/test_visibility.rs":"cf4c93997cd88821ef7f8e2dd5d1586175cce4614407cd3bdf371ecc4d8abc44","tests/zzz_stable.rs":"2a862e59cb446235ed99aec0e6ada8e16d3ecc30229b29d825b7c0bbc2602989"},"package":"a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822"} \ No newline at end of file diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/Cargo.toml clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/Cargo.toml --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/Cargo.toml 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/Cargo.toml 2023-08-15 22:24:19.000000000 +0000 @@ -10,14 +10,13 @@ # See Cargo.toml.orig for the original contents. [package] -edition = "2018" -rust-version = "1.31" +edition = "2021" +rust-version = "1.56" name = "syn" -version = "1.0.107" +version = "2.0.15" authors = ["David Tolnay "] include = [ "/benches/**", - "/build.rs", "/Cargo.toml", "/LICENSE-APACHE", "/LICENSE-MIT", @@ -41,11 +40,11 @@ [package.metadata.docs.rs] all-features = true -targets = ["x86_64-unknown-linux-gnu"] rustdoc-args = [ "--cfg", "doc_cfg", ] +targets = ["x86_64-unknown-linux-gnu"] [package.metadata.playground] features = [ @@ -75,42 +74,45 @@ ] [dependencies.proc-macro2] -version = "1.0.46" +version = "1.0.55" default-features = false [dependencies.quote] -version = "1.0" +version = "1.0.25" optional = true default-features = false [dependencies.unicode-ident] -version = "1.0" +version = "1" [dev-dependencies.anyhow] -version = "1.0" +version = "1" [dev-dependencies.automod] -version = "1.0" +version = "1" [dev-dependencies.flate2] -version = "1.0" +version = "1" [dev-dependencies.insta] -version = "1.0" +version = "1" [dev-dependencies.rayon] -version = "1.0" +version = "1" [dev-dependencies.ref-cast] -version = "1.0" +version = "1" [dev-dependencies.regex] -version = "1.0" +version = "1" [dev-dependencies.reqwest] version = "0.11" features = ["blocking"] +[dev-dependencies.rustversion] +version = "1" + [dev-dependencies.syn-test-suite] version = "0" @@ -118,10 +120,10 @@ version = "0.4.16" [dev-dependencies.termcolor] -version = "1.0" +version = "1" [dev-dependencies.walkdir] -version = "2.1" +version = "2.3.2" [features] clone-impls = [] diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/LICENSE-APACHE clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/LICENSE-APACHE --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/LICENSE-APACHE 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/LICENSE-APACHE 2023-08-15 22:24:19.000000000 +0000 @@ -174,28 +174,3 @@ of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/README.md clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/README.md --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/README.md 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/README.md 2023-08-15 22:24:19.000000000 +0000 @@ -46,7 +46,7 @@ [`syn::DeriveInput`]: https://docs.rs/syn/1.0/syn/struct.DeriveInput.html [parser functions]: https://docs.rs/syn/1.0/syn/parse/index.html -*Version requirement: Syn supports rustc 1.31 and up.* +*Version requirement: Syn supports rustc 1.56 and up.* [*Release notes*](https://github.com/dtolnay/syn/releases) @@ -76,7 +76,7 @@ ```toml [dependencies] -syn = "1.0" +syn = "2.0" quote = "1.0" [lib] @@ -104,9 +104,8 @@ ``` The [`heapsize`] example directory shows a complete working implementation of a -derive macro. It works on any Rust compiler 1.31+. The example derives a -`HeapSize` trait which computes an estimate of the amount of heap memory owned -by a value. +derive macro. The example derives a `HeapSize` trait which computes an estimate +of the amount of heap memory owned by a value. [`heapsize`]: examples/heapsize diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/attr.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/attr.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/attr.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/attr.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,19 +1,15 @@ use super::*; -use crate::punctuated::Punctuated; use proc_macro2::TokenStream; use std::iter; use std::slice; #[cfg(feature = "parsing")] -use crate::parse::{Parse, ParseBuffer, ParseStream, Parser, Result}; +use crate::meta::{self, ParseNestedMeta}; #[cfg(feature = "parsing")] -use crate::punctuated::Pair; +use crate::parse::{Parse, ParseStream, Parser, Result}; ast_struct! { - /// An attribute like `#[repr(transparent)]`. - /// - /// *This type is available only if Syn is built with the `"derive"` or `"full"` - /// feature.* + /// An attribute, like `#[repr(transparent)]`. /// ///
/// @@ -23,27 +19,52 @@ /// /// - Outer attributes like `#[repr(transparent)]`. These appear outside or /// in front of the item they describe. + /// /// - Inner attributes like `#![feature(proc_macro)]`. These appear inside /// of the item they describe, usually a module. - /// - Outer doc comments like `/// # Example`. - /// - Inner doc comments like `//! Please file an issue`. - /// - Outer block comments `/** # Example */`. - /// - Inner block comments `/*! Please file an issue */`. + /// + /// - Outer one-line doc comments like `/// Example`. + /// + /// - Inner one-line doc comments like `//! Please file an issue`. + /// + /// - Outer documentation blocks `/** Example */`. + /// + /// - Inner documentation blocks `/*! Please file an issue */`. /// /// The `style` field of type `AttrStyle` distinguishes whether an attribute - /// is outer or inner. Doc comments and block comments are promoted to - /// attributes, as this is how they are processed by the compiler and by - /// `macro_rules!` macros. + /// is outer or inner. /// - /// The `path` field gives the possibly colon-delimited path against which - /// the attribute is resolved. It is equal to `"doc"` for desugared doc - /// comments. The `tokens` field contains the rest of the attribute body as - /// tokens. + /// Every attribute has a `path` that indicates the intended interpretation + /// of the rest of the attribute's contents. The path and the optional + /// additional contents are represented together in the `meta` field of the + /// attribute in three possible varieties: + /// + /// - Meta::Path — attributes whose information content conveys just a + /// path, for example the `#[test]` attribute. + /// + /// - Meta::List — attributes that carry arbitrary tokens after the + /// path, surrounded by a delimiter (parenthesis, bracket, or brace). For + /// example `#[derive(Copy)]` or `#[precondition(x < 5)]`. + /// + /// - Meta::NameValue — attributes with an `=` sign after the path, + /// followed by a Rust expression. For example `#[path = + /// "sys/windows.rs"]`. + /// + /// All doc comments are represented in the NameValue style with a path of + /// "doc", as this is how they are processed by the compiler and by + /// `macro_rules!` macros. /// /// ```text - /// #[derive(Copy)] #[crate::precondition x < 5] - /// ^^^^^^~~~~~~ ^^^^^^^^^^^^^^^^^^^ ~~~~~ - /// path tokens path tokens + /// #[derive(Copy, Clone)] + /// ~~~~~~Path + /// ^^^^^^^^^^^^^^^^^^^Meta::List + /// + /// #[path = "sys/windows.rs"] + /// ~~~~Path + /// ^^^^^^^^^^^^^^^^^^^^^^^Meta::NameValue + /// + /// #[test] + /// ^^^^Meta::Path /// ``` /// ///
@@ -93,18 +114,13 @@ /// /// The grammar of attributes in Rust is very flexible, which makes the /// syntax tree not that useful on its own. In particular, arguments of the - /// attribute are held in an arbitrary `tokens: TokenStream`. Macros are - /// expected to check the `path` of the attribute, decide whether they - /// recognize it, and then parse the remaining tokens according to whatever - /// grammar they wish to require for that kind of attribute. - /// - /// If the attribute you are parsing is expected to conform to the - /// conventional structured form of attribute, use [`parse_meta()`] to - /// obtain that structured representation. If the attribute follows some - /// other grammar of its own, use [`parse_args()`] to parse that into the - /// expected data structure. + /// `Meta::List` variety of attribute are held in an arbitrary `tokens: + /// TokenStream`. Macros are expected to check the `path` of the attribute, + /// decide whether they recognize it, and then parse the remaining tokens + /// according to whatever grammar they wish to require for that kind of + /// attribute. Use [`parse_args()`] to parse those tokens into the expected + /// data structure. /// - /// [`parse_meta()`]: Attribute::parse_meta /// [`parse_args()`]: Attribute::parse_args /// ///


@@ -150,65 +166,49 @@ pub pound_token: Token![#], pub style: AttrStyle, pub bracket_token: token::Bracket, - pub path: Path, - pub tokens: TokenStream, + pub meta: Meta, } } impl Attribute { - /// Parses the content of the attribute, consisting of the path and tokens, - /// as a [`Meta`] if possible. + /// Returns the path that identifies the interpretation of this attribute. /// - /// *This function is available only if Syn is built with the `"parsing"` - /// feature.* - #[cfg(feature = "parsing")] - #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] - pub fn parse_meta(&self) -> Result { - fn clone_ident_segment(segment: &PathSegment) -> PathSegment { - PathSegment { - ident: segment.ident.clone(), - arguments: PathArguments::None, - } - } - - let path = Path { - leading_colon: self - .path - .leading_colon - .as_ref() - .map(|colon| Token![::](colon.spans)), - segments: self - .path - .segments - .pairs() - .map(|pair| match pair { - Pair::Punctuated(seg, punct) => { - Pair::Punctuated(clone_ident_segment(seg), Token![::](punct.spans)) - } - Pair::End(seg) => Pair::End(clone_ident_segment(seg)), - }) - .collect(), - }; - - let parser = |input: ParseStream| parsing::parse_meta_after_path(path, input); - parse::Parser::parse2(parser, self.tokens.clone()) + /// For example this would return the `test` in `#[test]`, the `derive` in + /// `#[derive(Copy)]`, and the `path` in `#[path = "sys/windows.rs"]`. + pub fn path(&self) -> &Path { + self.meta.path() } /// Parse the arguments to the attribute as a syntax tree. /// - /// This is similar to `syn::parse2::(attr.tokens)` except that: + /// This is similar to pulling out the `TokenStream` from `Meta::List` and + /// doing `syn::parse2::(meta_list.tokens)`, except that using + /// `parse_args` the error message has a more useful span when `tokens` is + /// empty. /// - /// - the surrounding delimiters are *not* included in the input to the - /// parser; and - /// - the error message has a more useful span when `tokens` is empty. + /// The surrounding delimiters are *not* included in the input to the + /// parser. /// /// ```text /// #[my_attr(value < 5)] /// ^^^^^^^^^ what gets parsed /// ``` /// - /// *This function is available only if Syn is built with the `"parsing"` - /// feature.* + /// # Example + /// + /// ``` + /// use syn::{parse_quote, Attribute, Expr}; + /// + /// let attr: Attribute = parse_quote! { + /// #[precondition(value < 5)] + /// }; + /// + /// if attr.path().is_ident("precondition") { + /// let precondition: Expr = attr.parse_args()?; + /// // ... + /// } + /// # anyhow::Ok(()) + /// ``` #[cfg(feature = "parsing")] #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] pub fn parse_args(&self) -> Result { @@ -217,22 +217,182 @@ /// Parse the arguments to the attribute using the given parser. /// - /// *This function is available only if Syn is built with the `"parsing"` - /// feature.* + /// # Example + /// + /// ``` + /// use syn::{parse_quote, Attribute}; + /// + /// let attr: Attribute = parse_quote! { + /// #[inception { #[brrrrrrraaaaawwwwrwrrrmrmrmmrmrmmmmm] }] + /// }; + /// + /// let bwom = attr.parse_args_with(Attribute::parse_outer)?; + /// + /// // Attribute does not have a Parse impl, so we couldn't directly do: + /// // let bwom: Attribute = attr.parse_args()?; + /// # anyhow::Ok(()) + /// ``` #[cfg(feature = "parsing")] #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] pub fn parse_args_with(&self, parser: F) -> Result { - let parser = |input: ParseStream| { - let args = enter_args(self, input)?; - parse::parse_stream(parser, &args) - }; - parser.parse2(self.tokens.clone()) + match &self.meta { + Meta::Path(path) => Err(crate::error::new2( + path.segments.first().unwrap().ident.span(), + path.segments.last().unwrap().ident.span(), + format!( + "expected attribute arguments in parentheses: {}[{}(...)]", + parsing::DisplayAttrStyle(&self.style), + parsing::DisplayPath(path), + ), + )), + Meta::NameValue(meta) => Err(Error::new( + meta.eq_token.span, + format_args!( + "expected parentheses: {}[{}(...)]", + parsing::DisplayAttrStyle(&self.style), + parsing::DisplayPath(&meta.path), + ), + )), + Meta::List(meta) => meta.parse_args_with(parser), + } + } + + /// Parse the arguments to the attribute, expecting it to follow the + /// conventional structure used by most of Rust's built-in attributes. + /// + /// The [*Meta Item Attribute Syntax*][syntax] section in the Rust reference + /// explains the convention in more detail. Not all attributes follow this + /// convention, so [`parse_args()`][Self::parse_args] is available if you + /// need to parse arbitrarily goofy attribute syntax. + /// + /// [syntax]: https://doc.rust-lang.org/reference/attributes.html#meta-item-attribute-syntax + /// + /// # Example + /// + /// We'll parse a struct, and then parse some of Rust's `#[repr]` attribute + /// syntax. + /// + /// ``` + /// use syn::{parenthesized, parse_quote, token, ItemStruct, LitInt}; + /// + /// let input: ItemStruct = parse_quote! { + /// #[repr(C, align(4))] + /// pub struct MyStruct(u16, u32); + /// }; + /// + /// let mut repr_c = false; + /// let mut repr_transparent = false; + /// let mut repr_align = None::; + /// let mut repr_packed = None::; + /// for attr in &input.attrs { + /// if attr.path().is_ident("repr") { + /// attr.parse_nested_meta(|meta| { + /// // #[repr(C)] + /// if meta.path.is_ident("C") { + /// repr_c = true; + /// return Ok(()); + /// } + /// + /// // #[repr(transparent)] + /// if meta.path.is_ident("transparent") { + /// repr_transparent = true; + /// return Ok(()); + /// } + /// + /// // #[repr(align(N))] + /// if meta.path.is_ident("align") { + /// let content; + /// parenthesized!(content in meta.input); + /// let lit: LitInt = content.parse()?; + /// let n: usize = lit.base10_parse()?; + /// repr_align = Some(n); + /// return Ok(()); + /// } + /// + /// // #[repr(packed)] or #[repr(packed(N))], omitted N means 1 + /// if meta.path.is_ident("packed") { + /// if meta.input.peek(token::Paren) { + /// let content; + /// parenthesized!(content in meta.input); + /// let lit: LitInt = content.parse()?; + /// let n: usize = lit.base10_parse()?; + /// repr_packed = Some(n); + /// } else { + /// repr_packed = Some(1); + /// } + /// return Ok(()); + /// } + /// + /// Err(meta.error("unrecognized repr")) + /// })?; + /// } + /// } + /// # anyhow::Ok(()) + /// ``` + /// + /// # Alternatives + /// + /// In some cases, for attributes which have nested layers of structured + /// content, the following less flexible approach might be more convenient: + /// + /// ``` + /// # use syn::{parse_quote, ItemStruct}; + /// # + /// # let input: ItemStruct = parse_quote! { + /// # #[repr(C, align(4))] + /// # pub struct MyStruct(u16, u32); + /// # }; + /// # + /// use syn::punctuated::Punctuated; + /// use syn::{parenthesized, token, Error, LitInt, Meta, Token}; + /// + /// let mut repr_c = false; + /// let mut repr_transparent = false; + /// let mut repr_align = None::; + /// let mut repr_packed = None::; + /// for attr in &input.attrs { + /// if attr.path().is_ident("repr") { + /// let nested = attr.parse_args_with(Punctuated::::parse_terminated)?; + /// for meta in nested { + /// match meta { + /// // #[repr(C)] + /// Meta::Path(path) if path.is_ident("C") => { + /// repr_c = true; + /// } + /// + /// // #[repr(align(N))] + /// Meta::List(meta) if meta.path.is_ident("align") => { + /// let lit: LitInt = meta.parse_args()?; + /// let n: usize = lit.base10_parse()?; + /// repr_align = Some(n); + /// } + /// + /// /* ... */ + /// + /// _ => { + /// return Err(Error::new_spanned(meta, "unrecognized repr")); + /// } + /// } + /// } + /// } + /// } + /// # Ok(()) + /// ``` + #[cfg(feature = "parsing")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] + pub fn parse_nested_meta( + &self, + logic: impl FnMut(ParseNestedMeta) -> Result<()>, + ) -> Result<()> { + self.parse_args_with(meta::parser(logic)) } /// Parses zero or more outer attributes from the stream. /// - /// *This function is available only if Syn is built with the `"parsing"` - /// feature.* + /// # Example + /// + /// See + /// [*Parsing from tokens to Attribute*](#parsing-from-tokens-to-attribute). #[cfg(feature = "parsing")] #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] pub fn parse_outer(input: ParseStream) -> Result> { @@ -245,8 +405,10 @@ /// Parses zero or more inner attributes from the stream. /// - /// *This function is available only if Syn is built with the `"parsing"` - /// feature.* + /// # Example + /// + /// See + /// [*Parsing from tokens to Attribute*](#parsing-from-tokens-to-attribute). #[cfg(feature = "parsing")] #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] pub fn parse_inner(input: ParseStream) -> Result> { @@ -256,65 +418,10 @@ } } -#[cfg(feature = "parsing")] -fn expected_parentheses(attr: &Attribute) -> String { - let style = match attr.style { - AttrStyle::Outer => "#", - AttrStyle::Inner(_) => "#!", - }; - - let mut path = String::new(); - for segment in &attr.path.segments { - if !path.is_empty() || attr.path.leading_colon.is_some() { - path += "::"; - } - path += &segment.ident.to_string(); - } - - format!("{}[{}(...)]", style, path) -} - -#[cfg(feature = "parsing")] -fn enter_args<'a>(attr: &Attribute, input: ParseStream<'a>) -> Result> { - if input.is_empty() { - let expected = expected_parentheses(attr); - let msg = format!("expected attribute arguments in parentheses: {}", expected); - return Err(crate::error::new2( - attr.pound_token.span, - attr.bracket_token.span, - msg, - )); - } else if input.peek(Token![=]) { - let expected = expected_parentheses(attr); - let msg = format!("expected parentheses: {}", expected); - return Err(input.error(msg)); - }; - - let content; - if input.peek(token::Paren) { - parenthesized!(content in input); - } else if input.peek(token::Bracket) { - bracketed!(content in input); - } else if input.peek(token::Brace) { - braced!(content in input); - } else { - return Err(input.error("unexpected token in attribute arguments")); - } - - if input.is_empty() { - Ok(content) - } else { - Err(input.error("unexpected token in attribute arguments")) - } -} - ast_enum! { /// Distinguishes between attributes that decorate an item and attributes /// that are contained within an item. /// - /// *This type is available only if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// /// # Outer attributes /// /// - `#[repr(transparent)]` @@ -336,9 +443,6 @@ ast_enum_of_structs! { /// Content of a compile-time structured attribute. /// - /// *This type is available only if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// /// ## Path /// /// A meta path is like the `test` in `#[test]`. @@ -371,32 +475,26 @@ ast_struct! { /// A structured list within an attribute, like `derive(Copy, Clone)`. - /// - /// *This type is available only if Syn is built with the `"derive"` or - /// `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] pub struct MetaList { pub path: Path, - pub paren_token: token::Paren, - pub nested: Punctuated, + pub delimiter: MacroDelimiter, + pub tokens: TokenStream, } } ast_struct! { /// A name-value pair within an attribute, like `feature = "nightly"`. - /// - /// *This type is available only if Syn is built with the `"derive"` or - /// `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] pub struct MetaNameValue { pub path: Path, pub eq_token: Token![=], - pub lit: Lit, + pub value: Expr, } } impl Meta { - /// Returns the identifier that begins this structured meta item. + /// Returns the path that begins this structured meta item. /// /// For example this would return the `test` in `#[test]`, the `derive` in /// `#[derive(Copy)]`, and the `path` in `#[path = "sys/windows.rs"]`. @@ -407,63 +505,84 @@ Meta::NameValue(meta) => &meta.path, } } -} -ast_enum_of_structs! { - /// Element of a compile-time attribute list. - /// - /// *This type is available only if Syn is built with the `"derive"` or `"full"` - /// feature.* - #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] - pub enum NestedMeta { - /// A structured meta item, like the `Copy` in `#[derive(Copy)]` which - /// would be a nested `Meta::Path`. - Meta(Meta), - - /// A Rust literal, like the `"new_name"` in `#[rename("new_name")]`. - Lit(Lit), + /// Error if this is a `Meta::List` or `Meta::NameValue`. + #[cfg(feature = "parsing")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] + pub fn require_path_only(&self) -> Result<&Path> { + let error_span = match self { + Meta::Path(path) => return Ok(path), + Meta::List(meta) => meta.delimiter.span().open(), + Meta::NameValue(meta) => meta.eq_token.span, + }; + Err(Error::new(error_span, "unexpected token in attribute")) + } + + /// Error if this is a `Meta::Path` or `Meta::NameValue`. + #[cfg(feature = "parsing")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] + pub fn require_list(&self) -> Result<&MetaList> { + match self { + Meta::List(meta) => Ok(meta), + Meta::Path(path) => Err(crate::error::new2( + path.segments.first().unwrap().ident.span(), + path.segments.last().unwrap().ident.span(), + format!( + "expected attribute arguments in parentheses: `{}(...)`", + parsing::DisplayPath(path), + ), + )), + Meta::NameValue(meta) => Err(Error::new(meta.eq_token.span, "expected `(`")), + } + } + + /// Error if this is a `Meta::Path` or `Meta::List`. + #[cfg(feature = "parsing")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] + pub fn require_name_value(&self) -> Result<&MetaNameValue> { + match self { + Meta::NameValue(meta) => Ok(meta), + Meta::Path(path) => Err(crate::error::new2( + path.segments.first().unwrap().ident.span(), + path.segments.last().unwrap().ident.span(), + format!( + "expected a value for this attribute: `{} = ...`", + parsing::DisplayPath(path), + ), + )), + Meta::List(meta) => Err(Error::new(meta.delimiter.span().open(), "expected `=`")), + } } } -/// Conventional argument type associated with an invocation of an attribute -/// macro. -/// -/// For example if we are developing an attribute macro that is intended to be -/// invoked on function items as follows: -/// -/// ``` -/// # const IGNORE: &str = stringify! { -/// #[my_attribute(path = "/v1/refresh")] -/// # }; -/// pub fn refresh() { -/// /* ... */ -/// } -/// ``` -/// -/// The implementation of this macro would want to parse its attribute arguments -/// as type `AttributeArgs`. -/// -/// ``` -/// # extern crate proc_macro; -/// # -/// use proc_macro::TokenStream; -/// use syn::{parse_macro_input, AttributeArgs, ItemFn}; -/// -/// # const IGNORE: &str = stringify! { -/// #[proc_macro_attribute] -/// # }; -/// pub fn my_attribute(args: TokenStream, input: TokenStream) -> TokenStream { -/// let args = parse_macro_input!(args as AttributeArgs); -/// let input = parse_macro_input!(input as ItemFn); -/// -/// /* ... */ -/// # "".parse().unwrap() -/// } -/// ``` -#[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] -pub type AttributeArgs = Vec; +impl MetaList { + /// See [`Attribute::parse_args`]. + #[cfg(feature = "parsing")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] + pub fn parse_args(&self) -> Result { + self.parse_args_with(T::parse) + } -pub trait FilterAttrs<'a> { + /// See [`Attribute::parse_args_with`]. + #[cfg(feature = "parsing")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] + pub fn parse_args_with(&self, parser: F) -> Result { + let scope = self.delimiter.span().close(); + crate::parse::parse_scoped(parser, scope, self.tokens.clone()) + } + + /// See [`Attribute::parse_nested_meta`]. + #[cfg(feature = "parsing")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] + pub fn parse_nested_meta( + &self, + logic: impl FnMut(ParseNestedMeta) -> Result<()>, + ) -> Result<()> { + self.parse_args_with(meta::parser(logic)) + } +} + +pub(crate) trait FilterAttrs<'a> { type Ret: Iterator; fn outer(self) -> Self::Ret; @@ -495,69 +614,43 @@ } #[cfg(feature = "parsing")] -pub mod parsing { +pub(crate) mod parsing { use super::*; - use crate::ext::IdentExt; + use crate::parse::discouraged::Speculative; use crate::parse::{Parse, ParseStream, Result}; + use std::fmt::{self, Display}; - pub fn parse_inner(input: ParseStream, attrs: &mut Vec) -> Result<()> { + pub(crate) fn parse_inner(input: ParseStream, attrs: &mut Vec) -> Result<()> { while input.peek(Token![#]) && input.peek2(Token![!]) { attrs.push(input.call(parsing::single_parse_inner)?); } Ok(()) } - pub fn single_parse_inner(input: ParseStream) -> Result { + pub(crate) fn single_parse_inner(input: ParseStream) -> Result { let content; Ok(Attribute { pound_token: input.parse()?, style: AttrStyle::Inner(input.parse()?), bracket_token: bracketed!(content in input), - path: content.call(Path::parse_mod_style)?, - tokens: content.parse()?, + meta: content.parse()?, }) } - pub fn single_parse_outer(input: ParseStream) -> Result { + pub(crate) fn single_parse_outer(input: ParseStream) -> Result { let content; Ok(Attribute { pound_token: input.parse()?, style: AttrStyle::Outer, bracket_token: bracketed!(content in input), - path: content.call(Path::parse_mod_style)?, - tokens: content.parse()?, - }) - } - - // Like Path::parse_mod_style but accepts keywords in the path. - fn parse_meta_path(input: ParseStream) -> Result { - Ok(Path { - leading_colon: input.parse()?, - segments: { - let mut segments = Punctuated::new(); - while input.peek(Ident::peek_any) { - let ident = Ident::parse_any(input)?; - segments.push_value(PathSegment::from(ident)); - if !input.peek(Token![::]) { - break; - } - let punct = input.parse()?; - segments.push_punct(punct); - } - if segments.is_empty() { - return Err(input.error("expected path")); - } else if segments.trailing_punct() { - return Err(input.error("expected path segment")); - } - segments - }, + meta: content.parse()?, }) } #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] impl Parse for Meta { fn parse(input: ParseStream) -> Result { - let path = input.call(parse_meta_path)?; + let path = input.call(Path::parse_mod_style)?; parse_meta_after_path(path, input) } } @@ -565,7 +658,7 @@ #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] impl Parse for MetaList { fn parse(input: ParseStream) -> Result { - let path = input.call(parse_meta_path)?; + let path = input.call(Path::parse_mod_style)?; parse_meta_list_after_path(path, input) } } @@ -573,28 +666,13 @@ #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] impl Parse for MetaNameValue { fn parse(input: ParseStream) -> Result { - let path = input.call(parse_meta_path)?; + let path = input.call(Path::parse_mod_style)?; parse_meta_name_value_after_path(path, input) } } - #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] - impl Parse for NestedMeta { - fn parse(input: ParseStream) -> Result { - if input.peek(Lit) && !(input.peek(LitBool) && input.peek2(Token![=])) { - input.parse().map(NestedMeta::Lit) - } else if input.peek(Ident::peek_any) - || input.peek(Token![::]) && input.peek3(Ident::peek_any) - { - input.parse().map(NestedMeta::Meta) - } else { - Err(input.error("expected identifier or literal")) - } - } - } - - pub fn parse_meta_after_path(path: Path, input: ParseStream) -> Result { - if input.peek(token::Paren) { + pub(crate) fn parse_meta_after_path(path: Path, input: ParseStream) -> Result { + if input.peek(token::Paren) || input.peek(token::Bracket) || input.peek(token::Brace) { parse_meta_list_after_path(path, input).map(Meta::List) } else if input.peek(Token![=]) { parse_meta_name_value_after_path(path, input).map(Meta::NameValue) @@ -604,21 +682,60 @@ } fn parse_meta_list_after_path(path: Path, input: ParseStream) -> Result { - let content; + let (delimiter, tokens) = mac::parse_delimiter(input)?; Ok(MetaList { path, - paren_token: parenthesized!(content in input), - nested: content.parse_terminated(NestedMeta::parse)?, + delimiter, + tokens, }) } fn parse_meta_name_value_after_path(path: Path, input: ParseStream) -> Result { + let eq_token: Token![=] = input.parse()?; + let ahead = input.fork(); + let lit: Option = ahead.parse()?; + let value = if let (Some(lit), true) = (lit, ahead.is_empty()) { + input.advance_to(&ahead); + Expr::Lit(ExprLit { + attrs: Vec::new(), + lit, + }) + } else if input.peek(Token![#]) && input.peek2(token::Bracket) { + return Err(input.error("unexpected attribute inside of attribute")); + } else { + input.parse()? + }; Ok(MetaNameValue { path, - eq_token: input.parse()?, - lit: input.parse()?, + eq_token, + value, }) } + + pub(super) struct DisplayAttrStyle<'a>(pub &'a AttrStyle); + + impl<'a> Display for DisplayAttrStyle<'a> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(match self.0 { + AttrStyle::Outer => "#", + AttrStyle::Inner(_) => "#!", + }) + } + } + + pub(super) struct DisplayPath<'a>(pub &'a Path); + + impl<'a> Display for DisplayPath<'a> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + for (i, segment) in self.0.segments.iter().enumerate() { + if i > 0 || self.0.leading_colon.is_some() { + formatter.write_str("::")?; + } + write!(formatter, "{}", segment.ident)?; + } + Ok(()) + } + } } #[cfg(feature = "printing")] @@ -635,8 +752,7 @@ b.to_tokens(tokens); } self.bracket_token.surround(tokens, |tokens| { - self.path.to_tokens(tokens); - self.tokens.to_tokens(tokens); + self.meta.to_tokens(tokens); }); } } @@ -645,9 +761,7 @@ impl ToTokens for MetaList { fn to_tokens(&self, tokens: &mut TokenStream) { self.path.to_tokens(tokens); - self.paren_token.surround(tokens, |tokens| { - self.nested.to_tokens(tokens); - }); + self.delimiter.surround(tokens, self.tokens.clone()); } } @@ -656,7 +770,7 @@ fn to_tokens(&self, tokens: &mut TokenStream) { self.path.to_tokens(tokens); self.eq_token.to_tokens(tokens); - self.lit.to_tokens(tokens); + self.value.to_tokens(tokens); } } } diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/await.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/await.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/await.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/await.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -// See include!("await.rs") in token.rs. -export_token_macro! {[await]} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/bigint.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/bigint.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/bigint.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/bigint.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,16 +1,16 @@ use std::ops::{AddAssign, MulAssign}; // For implementing base10_digits() accessor on LitInt. -pub struct BigInt { +pub(crate) struct BigInt { digits: Vec, } impl BigInt { - pub fn new() -> Self { + pub(crate) fn new() -> Self { BigInt { digits: Vec::new() } } - pub fn to_string(&self) -> String { + pub(crate) fn to_string(&self) -> String { let mut repr = String::with_capacity(self.digits.len()); let mut has_nonzero = false; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/buffer.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/buffer.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/buffer.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/buffer.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,7 +1,5 @@ //! A stably addressed token buffer supporting efficient traversal based on a //! cheaply copyable cursor. -//! -//! *This module is available only if Syn is built with the `"parsing"` feature.* // This module is heavily commented as it contains most of the unsafe code in // Syn, and caution should be used when editing it. The public-facing interface @@ -13,6 +11,7 @@ ))] use crate::proc_macro as pm; use crate::Lifetime; +use proc_macro2::extra::DelimSpan; use proc_macro2::{Delimiter, Group, Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; use std::cmp::Ordering; use std::marker::PhantomData; @@ -33,8 +32,6 @@ /// A buffer that can be efficiently traversed multiple times, unlike /// `TokenStream` which requires a deep copy in order to traverse more than /// once. -/// -/// *This type is available only if Syn is built with the `"parsing"` feature.* pub struct TokenBuffer { // NOTE: Do not implement clone on this - while the current design could be // cloned, other designs which could be desirable may not be cloneable. @@ -63,13 +60,11 @@ /// Creates a `TokenBuffer` containing all the tokens from the input /// `proc_macro::TokenStream`. - /// - /// *This method is available only if Syn is built with both the `"parsing"` and - /// `"proc-macro"` features.* #[cfg(all( not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))), feature = "proc-macro" ))] + #[cfg_attr(doc_cfg, doc(cfg(feature = "proc-macro")))] pub fn new(stream: pm::TokenStream) -> Self { Self::new2(stream.into()) } @@ -101,11 +96,6 @@ /// /// An empty `Cursor` can be created directly, or one may create a `TokenBuffer` /// object and get a cursor to its first token with `begin()`. -/// -/// Two cursors are equal if they have the same location in the same input -/// stream, and have the same scope. -/// -/// *This type is available only if Syn is built with the `"parsing"` feature.* pub struct Cursor<'a> { // The current entry which the `Cursor` is pointing at. ptr: *const Entry, @@ -199,7 +189,7 @@ /// If the cursor is pointing at a `Group` with the given delimiter, returns /// a cursor into that group and one pointing to the next `TokenTree`. - pub fn group(mut self, delim: Delimiter) -> Option<(Cursor<'a>, Span, Cursor<'a>)> { + pub fn group(mut self, delim: Delimiter) -> Option<(Cursor<'a>, DelimSpan, Cursor<'a>)> { // If we're not trying to enter a none-delimited group, we want to // ignore them. We have to make sure to _not_ ignore them when we want // to enter them, of course. For obvious reasons. @@ -209,16 +199,40 @@ if let Entry::Group(group, end_offset) = self.entry() { if group.delimiter() == delim { + let span = group.delim_span(); let end_of_group = unsafe { self.ptr.add(*end_offset) }; let inside_of_group = unsafe { Cursor::create(self.ptr.add(1), end_of_group) }; let after_group = unsafe { Cursor::create(end_of_group, self.scope) }; - return Some((inside_of_group, group.span(), after_group)); + return Some((inside_of_group, span, after_group)); } } None } + pub(crate) fn any_group(self) -> Option<(Cursor<'a>, Delimiter, DelimSpan, Cursor<'a>)> { + if let Entry::Group(group, end_offset) = self.entry() { + let delimiter = group.delimiter(); + let span = group.delim_span(); + let end_of_group = unsafe { self.ptr.add(*end_offset) }; + let inside_of_group = unsafe { Cursor::create(self.ptr.add(1), end_of_group) }; + let after_group = unsafe { Cursor::create(end_of_group, self.scope) }; + return Some((inside_of_group, delimiter, span, after_group)); + } + + None + } + + pub(crate) fn any_group_token(self) -> Option<(Group, Cursor<'a>)> { + if let Entry::Group(group, end_offset) = self.entry() { + let end_of_group = unsafe { self.ptr.add(*end_offset) }; + let after_group = unsafe { Cursor::create(end_of_group, self.scope) }; + return Some((group.clone(), after_group)); + } + + None + } + /// If the cursor is pointing at a `Ident`, returns it along with a cursor /// pointing at the next `TokenTree`. pub fn ident(mut self) -> Option<(Ident, Cursor<'a>)> { @@ -313,6 +327,33 @@ } } + /// Returns the `Span` of the token immediately prior to the position of + /// this cursor, or of the current token if there is no previous one. + #[cfg(any(feature = "full", feature = "derive"))] + pub(crate) fn prev_span(mut self) -> Span { + if start_of_buffer(self) < self.ptr { + self.ptr = unsafe { self.ptr.offset(-1) }; + if let Entry::End(_) = self.entry() { + // Locate the matching Group begin token. + let mut depth = 1; + loop { + self.ptr = unsafe { self.ptr.offset(-1) }; + match self.entry() { + Entry::Group(group, _) => { + depth -= 1; + if depth == 0 { + return group.span(); + } + } + Entry::End(_) => depth += 1, + Entry::Literal(_) | Entry::Ident(_) | Entry::Punct(_) => {} + } + } + } + } + self.span() + } + /// Skip over the next token without cloning it. Returns `None` if this /// cursor points to eof. /// @@ -368,11 +409,13 @@ } pub(crate) fn same_buffer(a: Cursor, b: Cursor) -> bool { + start_of_buffer(a) == start_of_buffer(b) +} + +fn start_of_buffer(cursor: Cursor) -> *const Entry { unsafe { - match (&*a.scope, &*b.scope) { - (Entry::End(a_offset), Entry::End(b_offset)) => { - a.scope.offset(*a_offset) == b.scope.offset(*b_offset) - } + match &*cursor.scope { + Entry::End(offset) => cursor.scope.offset(*offset), _ => unreachable!(), } } diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/custom_keyword.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/custom_keyword.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/custom_keyword.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/custom_keyword.rs 2023-08-15 22:24:19.000000000 +0000 @@ -96,26 +96,28 @@ #[doc(hidden)] #[allow(dead_code, non_snake_case)] - pub fn $ident<__S: $crate::__private::IntoSpans<[$crate::__private::Span; 1]>>( + pub fn $ident<__S: $crate::__private::IntoSpans<$crate::__private::Span>>( span: __S, ) -> $ident { $ident { - span: $crate::__private::IntoSpans::into_spans(span)[0], + span: $crate::__private::IntoSpans::into_spans(span), } } - impl $crate::__private::Default for $ident { - fn default() -> Self { - $ident { - span: $crate::__private::Span::call_site(), + const _: () = { + impl $crate::__private::Default for $ident { + fn default() -> Self { + $ident { + span: $crate::__private::Span::call_site(), + } } } - } - $crate::impl_parse_for_custom_keyword!($ident); - $crate::impl_to_tokens_for_custom_keyword!($ident); - $crate::impl_clone_for_custom_keyword!($ident); - $crate::impl_extra_traits_for_custom_keyword!($ident); + $crate::impl_parse_for_custom_keyword!($ident); + $crate::impl_to_tokens_for_custom_keyword!($ident); + $crate::impl_clone_for_custom_keyword!($ident); + $crate::impl_extra_traits_for_custom_keyword!($ident); + }; }; } @@ -129,14 +131,14 @@ impl $crate::token::CustomToken for $ident { fn peek(cursor: $crate::buffer::Cursor) -> $crate::__private::bool { if let $crate::__private::Some((ident, _rest)) = cursor.ident() { - ident == stringify!($ident) + ident == $crate::__private::stringify!($ident) } else { false } } fn display() -> &'static $crate::__private::str { - concat!("`", stringify!($ident), "`") + $crate::__private::concat!("`", $crate::__private::stringify!($ident), "`") } } @@ -144,14 +146,14 @@ fn parse(input: $crate::parse::ParseStream) -> $crate::parse::Result<$ident> { input.step(|cursor| { if let $crate::__private::Some((ident, rest)) = cursor.ident() { - if ident == stringify!($ident) { + if ident == $crate::__private::stringify!($ident) { return $crate::__private::Ok(($ident { span: ident.span() }, rest)); } } - $crate::__private::Err(cursor.error(concat!( + $crate::__private::Err(cursor.error($crate::__private::concat!( "expected `", - stringify!($ident), - "`" + $crate::__private::stringify!($ident), + "`", ))) }) } @@ -175,7 +177,7 @@ ($ident:ident) => { impl $crate::__private::ToTokens for $ident { fn to_tokens(&self, tokens: &mut $crate::__private::TokenStream2) { - let ident = $crate::Ident::new(stringify!($ident), self.span); + let ident = $crate::Ident::new($crate::__private::stringify!($ident), self.span); $crate::__private::TokenStreamExt::append(tokens, ident); } } @@ -225,7 +227,11 @@ fn fmt(&self, f: &mut $crate::__private::Formatter) -> $crate::__private::fmt::Result { $crate::__private::Formatter::write_str( f, - concat!("Keyword [", stringify!($ident), "]"), + $crate::__private::concat!( + "Keyword [", + $crate::__private::stringify!($ident), + "]", + ), ) } } diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/custom_punctuation.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/custom_punctuation.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/custom_punctuation.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/custom_punctuation.rs 2023-08-15 22:24:19.000000000 +0000 @@ -92,16 +92,18 @@ } } - impl $crate::__private::Default for $ident { - fn default() -> Self { - $ident($crate::__private::Span::call_site()) + const _: () = { + impl $crate::__private::Default for $ident { + fn default() -> Self { + $ident($crate::__private::Span::call_site()) + } } - } - $crate::impl_parse_for_custom_punctuation!($ident, $($tt)+); - $crate::impl_to_tokens_for_custom_punctuation!($ident, $($tt)+); - $crate::impl_clone_for_custom_punctuation!($ident, $($tt)+); - $crate::impl_extra_traits_for_custom_punctuation!($ident, $($tt)+); + $crate::impl_parse_for_custom_punctuation!($ident, $($tt)+); + $crate::impl_to_tokens_for_custom_punctuation!($ident, $($tt)+); + $crate::impl_clone_for_custom_punctuation!($ident, $($tt)+); + $crate::impl_extra_traits_for_custom_punctuation!($ident, $($tt)+); + }; }; } @@ -113,18 +115,18 @@ ($ident:ident, $($tt:tt)+) => { impl $crate::token::CustomToken for $ident { fn peek(cursor: $crate::buffer::Cursor) -> bool { - $crate::token::parsing::peek_punct(cursor, $crate::stringify_punct!($($tt)+)) + $crate::__private::peek_punct(cursor, $crate::stringify_punct!($($tt)+)) } fn display() -> &'static $crate::__private::str { - concat!("`", $crate::stringify_punct!($($tt)+), "`") + $crate::__private::concat!("`", $crate::stringify_punct!($($tt)+), "`") } } impl $crate::parse::Parse for $ident { fn parse(input: $crate::parse::ParseStream) -> $crate::parse::Result<$ident> { let spans: $crate::custom_punctuation_repr!($($tt)+) = - $crate::token::parsing::punct(input, $crate::stringify_punct!($($tt)+))?; + $crate::__private::parse_punct(input, $crate::stringify_punct!($($tt)+))?; Ok($ident(spans)) } } @@ -147,7 +149,7 @@ ($ident:ident, $($tt:tt)+) => { impl $crate::__private::ToTokens for $ident { fn to_tokens(&self, tokens: &mut $crate::__private::TokenStream2) { - $crate::token::printing::punct($crate::stringify_punct!($($tt)+), &self.spans, tokens) + $crate::__private::print_punct($crate::stringify_punct!($($tt)+), &self.spans, tokens) } } }; @@ -194,7 +196,7 @@ ($ident:ident, $($tt:tt)+) => { impl $crate::__private::Debug for $ident { fn fmt(&self, f: &mut $crate::__private::Formatter) -> $crate::__private::fmt::Result { - $crate::__private::Formatter::write_str(f, stringify!($ident)) + $crate::__private::Formatter::write_str(f, $crate::__private::stringify!($ident)) } } @@ -295,6 +297,6 @@ #[macro_export] macro_rules! stringify_punct { ($($tt:tt)+) => { - concat!($(stringify!($tt)),+) + $crate::__private::concat!($($crate::__private::stringify!($tt)),+) }; } diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/data.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/data.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/data.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/data.rs 2023-08-15 22:24:19.000000000 +0000 @@ -3,12 +3,8 @@ ast_struct! { /// An enum variant. - /// - /// *This type is available only if Syn is built with the `"derive"` or `"full"` - /// feature.* #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] pub struct Variant { - /// Attributes tagged on the variant. pub attrs: Vec, /// Name of the variant. @@ -25,9 +21,6 @@ ast_enum_of_structs! { /// Data stored within an enum variant or struct. /// - /// *This type is available only if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// /// # Syntax tree enum /// /// This type is a [syntax tree enum]. @@ -50,9 +43,6 @@ ast_struct! { /// Named fields of a struct or struct variant such as `Point { x: f64, /// y: f64 }`. - /// - /// *This type is available only if Syn is built with the `"derive"` or - /// `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] pub struct FieldsNamed { pub brace_token: token::Brace, @@ -62,9 +52,6 @@ ast_struct! { /// Unnamed fields of a tuple struct or tuple variant such as `Some(T)`. - /// - /// *This type is available only if Syn is built with the `"derive"` or - /// `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] pub struct FieldsUnnamed { pub paren_token: token::Paren, @@ -147,17 +134,14 @@ ast_struct! { /// A field of a struct or enum variant. - /// - /// *This type is available only if Syn is built with the `"derive"` or `"full"` - /// feature.* #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] pub struct Field { - /// Attributes tagged on the field. pub attrs: Vec, - /// Visibility of the field. pub vis: Visibility, + pub mutability: FieldMutability, + /// Name of the field, if any. /// /// Fields of tuple structs have no names. @@ -165,82 +149,14 @@ pub colon_token: Option, - /// Type of the field. pub ty: Type, } } -ast_enum_of_structs! { - /// The visibility level of an item: inherited or `pub` or - /// `pub(restricted)`. - /// - /// *This type is available only if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: Expr#syntax-tree-enums - #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] - pub enum Visibility { - /// A public visibility level: `pub`. - Public(VisPublic), - - /// A crate-level visibility: `crate`. - Crate(VisCrate), - - /// A visibility level restricted to some path: `pub(self)` or - /// `pub(super)` or `pub(crate)` or `pub(in some::module)`. - Restricted(VisRestricted), - - /// An inherited visibility, which usually means private. - Inherited, - } -} - -ast_struct! { - /// A public visibility level: `pub`. - /// - /// *This type is available only if Syn is built with the `"derive"` or - /// `"full"` feature.* - #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct VisPublic { - pub pub_token: Token![pub], - } -} - -ast_struct! { - /// A crate-level visibility: `crate`. - /// - /// *This type is available only if Syn is built with the `"derive"` or - /// `"full"` feature.* - #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct VisCrate { - pub crate_token: Token![crate], - } -} - -ast_struct! { - /// A visibility level restricted to some path: `pub(self)` or - /// `pub(super)` or `pub(crate)` or `pub(in some::module)`. - /// - /// *This type is available only if Syn is built with the `"derive"` or - /// `"full"` feature.* - #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct VisRestricted { - pub pub_token: Token![pub], - pub paren_token: token::Paren, - pub in_token: Option, - pub path: Box, - } -} - #[cfg(feature = "parsing")] -pub mod parsing { +pub(crate) mod parsing { use super::*; use crate::ext::IdentExt; - use crate::parse::discouraged::Speculative; use crate::parse::{Parse, ParseStream, Result}; #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] @@ -278,7 +194,7 @@ let content; Ok(FieldsNamed { brace_token: braced!(content in input), - named: content.parse_terminated(Field::parse_named)?, + named: content.parse_terminated(Field::parse_named, Token![,])?, }) } } @@ -289,7 +205,7 @@ let content; Ok(FieldsUnnamed { paren_token: parenthesized!(content in input), - unnamed: content.parse_terminated(Field::parse_unnamed)?, + unnamed: content.parse_terminated(Field::parse_unnamed, Token![,])?, }) } } @@ -301,6 +217,7 @@ Ok(Field { attrs: input.call(Attribute::parse_outer)?, vis: input.parse()?, + mutability: FieldMutability::None, ident: Some(if input.peek(Token![_]) { input.call(Ident::parse_any) } else { @@ -317,100 +234,13 @@ Ok(Field { attrs: input.call(Attribute::parse_outer)?, vis: input.parse()?, + mutability: FieldMutability::None, ident: None, colon_token: None, ty: input.parse()?, }) } } - - #[cfg_attr(doc_cfg, doc(cfg(feature = "parsing")))] - impl Parse for Visibility { - fn parse(input: ParseStream) -> Result { - // Recognize an empty None-delimited group, as produced by a $:vis - // matcher that matched no tokens. - if input.peek(token::Group) { - let ahead = input.fork(); - let group = crate::group::parse_group(&ahead)?; - if group.content.is_empty() { - input.advance_to(&ahead); - return Ok(Visibility::Inherited); - } - } - - if input.peek(Token![pub]) { - Self::parse_pub(input) - } else if input.peek(Token![crate]) { - Self::parse_crate(input) - } else { - Ok(Visibility::Inherited) - } - } - } - - impl Visibility { - fn parse_pub(input: ParseStream) -> Result { - let pub_token = input.parse::()?; - - if input.peek(token::Paren) { - let ahead = input.fork(); - - let content; - let paren_token = parenthesized!(content in ahead); - if content.peek(Token![crate]) - || content.peek(Token![self]) - || content.peek(Token![super]) - { - let path = content.call(Ident::parse_any)?; - - // Ensure there are no additional tokens within `content`. - // Without explicitly checking, we may misinterpret a tuple - // field as a restricted visibility, causing a parse error. - // e.g. `pub (crate::A, crate::B)` (Issue #720). - if content.is_empty() { - input.advance_to(&ahead); - return Ok(Visibility::Restricted(VisRestricted { - pub_token, - paren_token, - in_token: None, - path: Box::new(Path::from(path)), - })); - } - } else if content.peek(Token![in]) { - let in_token: Token![in] = content.parse()?; - let path = content.call(Path::parse_mod_style)?; - - input.advance_to(&ahead); - return Ok(Visibility::Restricted(VisRestricted { - pub_token, - paren_token, - in_token: Some(in_token), - path: Box::new(path), - })); - } - } - - Ok(Visibility::Public(VisPublic { pub_token })) - } - - fn parse_crate(input: ParseStream) -> Result { - if input.peek2(Token![::]) { - Ok(Visibility::Inherited) - } else { - Ok(Visibility::Crate(VisCrate { - crate_token: input.parse()?, - })) - } - } - - #[cfg(feature = "full")] - pub(crate) fn is_some(&self) -> bool { - match self { - Visibility::Inherited => false, - _ => true, - } - } - } } #[cfg(feature = "printing")] @@ -463,31 +293,4 @@ self.ty.to_tokens(tokens); } } - - #[cfg_attr(doc_cfg, doc(cfg(feature = "printing")))] - impl ToTokens for VisPublic { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.pub_token.to_tokens(tokens); - } - } - - #[cfg_attr(doc_cfg, doc(cfg(feature = "printing")))] - impl ToTokens for VisCrate { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.crate_token.to_tokens(tokens); - } - } - - #[cfg_attr(doc_cfg, doc(cfg(feature = "printing")))] - impl ToTokens for VisRestricted { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.pub_token.to_tokens(tokens); - self.paren_token.surround(tokens, |tokens| { - // TODO: If we have a path which is not "self" or "super" or - // "crate", automatically add the "in" token. - self.in_token.to_tokens(tokens); - self.path.to_tokens(tokens); - }); - } - } } diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/derive.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/derive.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/derive.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/derive.rs 2023-08-15 22:24:19.000000000 +0000 @@ -3,32 +3,19 @@ ast_struct! { /// Data structure sent to a `proc_macro_derive` macro. - /// - /// *This type is available only if Syn is built with the `"derive"` feature.* #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] pub struct DeriveInput { - /// Attributes tagged on the whole struct or enum. pub attrs: Vec, - - /// Visibility of the struct or enum. pub vis: Visibility, - - /// Name of the struct or enum. pub ident: Ident, - - /// Generics required to complete the definition. pub generics: Generics, - - /// Data within the struct or enum. pub data: Data, } } -ast_enum_of_structs! { +ast_enum! { /// The storage of a struct, enum or union data structure. /// - /// *This type is available only if Syn is built with the `"derive"` feature.* - /// /// # Syntax tree enum /// /// This type is a [syntax tree enum]. @@ -36,24 +23,14 @@ /// [syntax tree enum]: Expr#syntax-tree-enums #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] pub enum Data { - /// A struct input to a `proc_macro_derive` macro. Struct(DataStruct), - - /// An enum input to a `proc_macro_derive` macro. Enum(DataEnum), - - /// An untagged union input to a `proc_macro_derive` macro. Union(DataUnion), } - - do_not_generate_to_tokens } ast_struct! { /// A struct input to a `proc_macro_derive` macro. - /// - /// *This type is available only if Syn is built with the `"derive"` - /// feature.* #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] pub struct DataStruct { pub struct_token: Token![struct], @@ -64,9 +41,6 @@ ast_struct! { /// An enum input to a `proc_macro_derive` macro. - /// - /// *This type is available only if Syn is built with the `"derive"` - /// feature.* #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] pub struct DataEnum { pub enum_token: Token![enum], @@ -77,9 +51,6 @@ ast_struct! { /// An untagged union input to a `proc_macro_derive` macro. - /// - /// *This type is available only if Syn is built with the `"derive"` - /// feature.* #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] pub struct DataUnion { pub union_token: Token![union], @@ -88,7 +59,7 @@ } #[cfg(feature = "parsing")] -pub mod parsing { +pub(crate) mod parsing { use super::*; use crate::parse::{Parse, ParseStream, Result}; @@ -161,7 +132,7 @@ } } - pub fn data_struct( + pub(crate) fn data_struct( input: ParseStream, ) -> Result<(Option, Fields, Option)> { let mut lookahead = input.lookahead1(); @@ -197,7 +168,7 @@ } } - pub fn data_enum( + pub(crate) fn data_enum( input: ParseStream, ) -> Result<( Option, @@ -208,12 +179,12 @@ let content; let brace = braced!(content in input); - let variants = content.parse_terminated(Variant::parse)?; + let variants = content.parse_terminated(Variant::parse, Token![,])?; Ok((where_clause, brace, variants)) } - pub fn data_union(input: ParseStream) -> Result<(Option, FieldsNamed)> { + pub(crate) fn data_union(input: ParseStream) -> Result<(Option, FieldsNamed)> { let where_clause = input.parse()?; let fields = input.parse()?; Ok((where_clause, fields)) diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/discouraged.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/discouraged.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/discouraged.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/discouraged.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,6 +1,7 @@ //! Extensions to the parsing API with niche applicability. use super::*; +use proc_macro2::extra::DelimSpan; /// Extensions to the `ParseStream` API to support speculative parsing. pub trait Speculative { @@ -192,3 +193,27 @@ .set(unsafe { mem::transmute::>(fork.cursor()) }); } } + +/// Extensions to the `ParseStream` API to support manipulating invisible +/// delimiters the same as if they were visible. +pub trait AnyDelimiter { + /// Returns the delimiter, the span of the delimiter token, and the nested + /// contents for further parsing. + fn parse_any_delimiter(&self) -> Result<(Delimiter, DelimSpan, ParseBuffer)>; +} + +impl<'a> AnyDelimiter for ParseBuffer<'a> { + fn parse_any_delimiter(&self) -> Result<(Delimiter, DelimSpan, ParseBuffer)> { + self.step(|cursor| { + if let Some((content, delimiter, span, rest)) = cursor.any_group() { + let scope = crate::buffer::close_span_of_group(*cursor); + let nested = crate::parse::advance_step_cursor(cursor, content); + let unexpected = crate::parse::get_unexpected(self); + let content = crate::parse::new_parse_buffer(scope, nested, unexpected); + Ok(((delimiter, span, content), rest)) + } else { + Err(cursor.error("expected any delimiter")) + } + }) + } +} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/error.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/error.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/error.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/error.rs 2023-08-15 22:24:19.000000000 +0000 @@ -7,7 +7,6 @@ #[cfg(feature = "printing")] use quote::ToTokens; use std::fmt::{self, Debug, Display}; -use std::iter::FromIterator; use std::slice; use std::vec; @@ -34,18 +33,34 @@ /// # extern crate proc_macro; /// # /// use proc_macro::TokenStream; -/// use syn::{parse_macro_input, AttributeArgs, ItemFn}; +/// use syn::parse::{Parse, ParseStream, Result}; +/// use syn::{parse_macro_input, ItemFn}; /// /// # const IGNORE: &str = stringify! { /// #[proc_macro_attribute] /// # }; /// pub fn my_attr(args: TokenStream, input: TokenStream) -> TokenStream { -/// let args = parse_macro_input!(args as AttributeArgs); +/// let args = parse_macro_input!(args as MyAttrArgs); /// let input = parse_macro_input!(input as ItemFn); /// /// /* ... */ /// # TokenStream::new() /// } +/// +/// struct MyAttrArgs { +/// # _k: [(); { stringify! { +/// ... +/// # }; 0 }] +/// } +/// +/// impl Parse for MyAttrArgs { +/// fn parse(input: ParseStream) -> Result { +/// # stringify! { +/// ... +/// # }; +/// # unimplemented!() +/// } +/// } /// ``` /// /// For errors that arise later than the initial parsing stage, the @@ -89,14 +104,21 @@ struct ErrorMessage { // Span is implemented as an index into a thread-local interner to keep the // size small. It is not safe to access from a different thread. We want - // errors to be Send and Sync to play nicely with the Failure crate, so pin - // the span we're given to its original thread and assume it is - // Span::call_site if accessed from any other thread. - start_span: ThreadBound, - end_span: ThreadBound, + // errors to be Send and Sync to play nicely with ecosystem crates for error + // handling, so pin the span we're given to its original thread and assume + // it is Span::call_site if accessed from any other thread. + span: ThreadBound, message: String, } +// Cannot use std::ops::Range because that does not implement Copy, +// whereas ThreadBound requires a Copy impl as a way to ensure no Drop impls +// are involved. +struct SpanRange { + start: Span, + end: Span, +} + #[cfg(test)] struct _Test where @@ -139,8 +161,10 @@ fn new(span: Span, message: String) -> Error { Error { messages: vec![ErrorMessage { - start_span: ThreadBound::new(span), - end_span: ThreadBound::new(span), + span: ThreadBound::new(SpanRange { + start: span, + end: span, + }), message, }], } @@ -170,8 +194,7 @@ let end = iter.last().map_or(start, |t| t.span()); Error { messages: vec![ErrorMessage { - start_span: ThreadBound::new(start), - end_span: ThreadBound::new(end), + span: ThreadBound::new(SpanRange { start, end }), message, }], } @@ -184,11 +207,7 @@ /// if called from a different thread than the one on which the `Error` was /// originally created. pub fn span(&self) -> Span { - let start = match self.messages[0].start_span.get() { - Some(span) => *span, - None => return Span::call_site(), - }; - let end = match self.messages[0].end_span.get() { + let SpanRange { start, end } = match self.messages[0].span.get() { Some(span) => *span, None => return Span::call_site(), }; @@ -254,15 +273,34 @@ impl ErrorMessage { fn to_compile_error(&self) -> TokenStream { - let start = self - .start_span - .get() - .cloned() - .unwrap_or_else(Span::call_site); - let end = self.end_span.get().cloned().unwrap_or_else(Span::call_site); + let (start, end) = match self.span.get() { + Some(range) => (range.start, range.end), + None => (Span::call_site(), Span::call_site()), + }; - // compile_error!($message) + // ::core::compile_error!($message) TokenStream::from_iter(vec![ + TokenTree::Punct({ + let mut punct = Punct::new(':', Spacing::Joint); + punct.set_span(start); + punct + }), + TokenTree::Punct({ + let mut punct = Punct::new(':', Spacing::Alone); + punct.set_span(start); + punct + }), + TokenTree::Ident(Ident::new("core", start)), + TokenTree::Punct({ + let mut punct = Punct::new(':', Spacing::Joint); + punct.set_span(start); + punct + }), + TokenTree::Punct({ + let mut punct = Punct::new(':', Spacing::Alone); + punct.set_span(start); + punct + }), TokenTree::Ident(Ident::new("compile_error", start)), TokenTree::Punct({ let mut punct = Punct::new('!', Spacing::Alone); @@ -285,7 +323,7 @@ } #[cfg(feature = "parsing")] -pub fn new_at(scope: Span, cursor: Cursor, message: T) -> Error { +pub(crate) fn new_at(scope: Span, cursor: Cursor, message: T) -> Error { if cursor.eof() { Error::new(scope, format!("unexpected end of input, {}", message)) } else { @@ -295,14 +333,13 @@ } #[cfg(all(feature = "parsing", any(feature = "full", feature = "derive")))] -pub fn new2(start: Span, end: Span, message: T) -> Error { +pub(crate) fn new2(start: Span, end: Span, message: T) -> Error { return new2(start, end, message.to_string()); fn new2(start: Span, end: Span, message: String) -> Error { Error { messages: vec![ErrorMessage { - start_span: ThreadBound::new(start), - end_span: ThreadBound::new(end), + span: ThreadBound::new(SpanRange { start, end }), message, }], } @@ -347,20 +384,21 @@ impl Clone for ErrorMessage { fn clone(&self) -> Self { - let start = self - .start_span - .get() - .cloned() - .unwrap_or_else(Span::call_site); - let end = self.end_span.get().cloned().unwrap_or_else(Span::call_site); ErrorMessage { - start_span: ThreadBound::new(start), - end_span: ThreadBound::new(end), + span: self.span.clone(), message: self.message.clone(), } } } +impl Clone for SpanRange { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for SpanRange {} + impl std::error::Error for Error {} impl From for Error { diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/export.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/export.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/export.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/export.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,14 +1,16 @@ pub use std::clone::Clone; pub use std::cmp::{Eq, PartialEq}; +pub use std::concat; pub use std::default::Default; pub use std::fmt::{self, Debug, Formatter}; pub use std::hash::{Hash, Hasher}; pub use std::marker::Copy; pub use std::option::Option::{None, Some}; pub use std::result::Result::{Err, Ok}; +pub use std::stringify; #[cfg(feature = "printing")] -pub extern crate quote; +pub use quote; pub use proc_macro2::{Span, TokenStream as TokenStream2}; @@ -17,6 +19,15 @@ pub use crate::span::IntoSpans; +#[cfg(all(feature = "parsing", feature = "printing"))] +pub use crate::parse_quote::parse as parse_quote; + +#[cfg(feature = "parsing")] +pub use crate::token::parsing::{peek_punct, punct as parse_punct}; + +#[cfg(feature = "printing")] +pub use crate::token::printing::punct as print_punct; + #[cfg(all( not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))), feature = "proc-macro" diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/expr.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/expr.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/syn/src/expr.rs 2023-02-13 06:00:44.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/syn/src/expr.rs 2023-08-15 22:24:19.000000000 +0000 @@ -1,7 +1,5 @@ use super::*; use crate::punctuated::Punctuated; -#[cfg(feature = "full")] -use crate::reserved::Reserved; use proc_macro2::{Span, TokenStream}; #[cfg(feature = "printing")] use quote::IdentFragment; @@ -87,7 +85,7 @@ /// see names getting repeated in your code, like accessing /// `receiver.receiver` or `pat.pat` or `cond.cond`. #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] - #[cfg_attr(not(syn_no_non_exhaustive), non_exhaustive)] + #[non_exhaustive] pub enum Expr { /// A slice literal expression: `[a, b, c, d]`. Array(ExprArray), @@ -95,24 +93,18 @@ /// An assignment expression: `a = compute()`. Assign(ExprAssign), - /// A compound assignment expression: `counter += 1`. - AssignOp(ExprAssignOp), - /// An async block: `async { ... }`. Async(ExprAsync), /// An await expression: `fut.await`. Await(ExprAwait), - /// A binary operation: `a + b`, `a * b`. + /// A binary operation: `a + b`, `a += b`. Binary(ExprBinary), /// A blocked scope: `{ ... }`. Block(ExprBlock), - /// A box expression: `box f`. - Box(ExprBox), - /// A `break`, with an optional label to break and an optional /// expression. Break(ExprBreak), @@ -126,6 +118,9 @@ /// A closure expression: `|a, b| a + b`. Closure(ExprClosure), + /// A const block: `const { ... }`. + Const(ExprConst), + /// A `continue`, with an optional label. Continue(ExprContinue), @@ -153,6 +148,9 @@ /// A square bracketed indexing expression: `vector[2]`. Index(ExprIndex), + /// The inferred value of a const generic argument, denoted `_`. + Infer(ExprInfer), + /// A `let` guard: `let Some(x) = opt`. Let(ExprLet), @@ -207,9 +205,6 @@ /// A tuple expression: `(a, b, c, d)`. Tuple(ExprTuple), - /// A type ascription expression: `foo: f64`. - Type(ExprType), - /// A unary operation: `!x`, `*x`. Unary(ExprUnary), @@ -225,8 +220,6 @@ /// A yield expression: `yield expr`. Yield(ExprYield), - // Not public API. - // // For testing exhaustiveness in downstream code, use the following idiom: // // match expr { @@ -243,16 +236,11 @@ // a variant. You will be notified by a test failure when a variant is // added, so that you can add code to handle it, but your library will // continue to compile and work for downstream users in the interim. - #[cfg(syn_no_non_exhaustive)] - #[doc(hidden)] - __NonExhaustive, } } ast_struct! { /// A slice literal expression: `[a, b, c, d]`. - /// - /// *This type is available only if Syn is built with the `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(feature = "full")))] pub struct ExprArray #full { pub attrs: Vec, @@ -263,8 +251,6 @@ ast_struct! { /// An assignment expression: `a = compute()`. - /// - /// *This type is available only if Syn is built with the `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(feature = "full")))] pub struct ExprAssign #full { pub attrs: Vec, @@ -275,22 +261,7 @@ } ast_struct! { - /// A compound assignment expression: `counter += 1`. - /// - /// *This type is available only if Syn is built with the `"full"` feature.* - #[cfg_attr(doc_cfg, doc(cfg(feature = "full")))] - pub struct ExprAssignOp #full { - pub attrs: Vec, - pub left: Box, - pub op: BinOp, - pub right: Box, - } -} - -ast_struct! { /// An async block: `async { ... }`. - /// - /// *This type is available only if Syn is built with the `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(feature = "full")))] pub struct ExprAsync #full { pub attrs: Vec, @@ -302,22 +273,17 @@ ast_struct! { /// An await expression: `fut.await`. - /// - /// *This type is available only if Syn is built with the `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(feature = "full")))] pub struct ExprAwait #full { pub attrs: Vec, pub base: Box, pub dot_token: Token![.], - pub await_token: token::Await, + pub await_token: Token![await], } } ast_struct! { - /// A binary operation: `a + b`, `a * b`. - /// - /// *This type is available only if Syn is built with the `"derive"` or - /// `"full"` feature.* + /// A binary operation: `a + b`, `a += b`. #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] pub struct ExprBinary { pub attrs: Vec, @@ -329,8 +295,6 @@ ast_struct! { /// A blocked scope: `{ ... }`. - /// - /// *This type is available only if Syn is built with the `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(feature = "full")))] pub struct ExprBlock #full { pub attrs: Vec, @@ -340,22 +304,8 @@ } ast_struct! { - /// A box expression: `box f`. - /// - /// *This type is available only if Syn is built with the `"full"` feature.* - #[cfg_attr(doc_cfg, doc(cfg(feature = "full")))] - pub struct ExprBox #full { - pub attrs: Vec, - pub box_token: Token![box], - pub expr: Box, - } -} - -ast_struct! { /// A `break`, with an optional label to break and an optional /// expression. - /// - /// *This type is available only if Syn is built with the `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(feature = "full")))] pub struct ExprBreak #full { pub attrs: Vec, @@ -367,9 +317,6 @@ ast_struct! { /// A function call expression: `invoke(a, b)`. - /// - /// *This type is available only if Syn is built with the `"derive"` or - /// `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] pub struct ExprCall { pub attrs: Vec, @@ -381,9 +328,6 @@ ast_struct! { /// A cast expression: `foo as f64`. - /// - /// *This type is available only if Syn is built with the `"derive"` or - /// `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] pub struct ExprCast { pub attrs: Vec, @@ -395,11 +339,11 @@ ast_struct! { /// A closure expression: `|a, b| a + b`. - /// - /// *This type is available only if Syn is built with the `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(feature = "full")))] pub struct ExprClosure #full { pub attrs: Vec, + pub lifetimes: Option, + pub constness: Option, pub movability: Option, pub asyncness: Option, pub capture: Option, @@ -412,9 +356,16 @@ } ast_struct! { + /// A const block: `const { ... }`. + pub struct ExprConst #full { + pub attrs: Vec, + pub const_token: Token![const], + pub block: Block, + } +} + +ast_struct! { /// A `continue`, with an optional label. - /// - /// *This type is available only if Syn is built with the `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(feature = "full")))] pub struct ExprContinue #full { pub attrs: Vec, @@ -426,8 +377,6 @@ ast_struct! { /// Access of a named struct field (`obj.k`) or unnamed tuple struct /// field (`obj.0`). - /// - /// *This type is available only if Syn is built with the `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(any(feature = "full", feature = "derive"))))] pub struct ExprField { pub attrs: Vec, @@ -439,14 +388,12 @@ ast_struct! { /// A for loop: `for pat in expr { ... }`. - /// - /// *This type is available only if Syn is built with the `"full"` feature.* #[cfg_attr(doc_cfg, doc(cfg(feature = "full")))] pub struct ExprForLoop #full { pub attrs: Vec, pub label: Option

( - path: P - ) -> Result - where P: AsRef<::std::ffi::OsStr> { - let library = ::libloading::Library::new(path)?; - Self::from_library(library) - } - - pub unsafe fn from_library( - library: L - ) -> Result - where L: Into<::libloading::Library> { - let __library = library.into(); - #( #constructor_inits )* - Ok(#lib_ident { - __library, - #( #init_fields ),* - }) - } - - #( #struct_implementation )* - } - } - } - - pub fn push( - &mut self, - ident: Ident, - abi: Abi, - is_variadic: bool, - is_required: bool, - args: Vec, - args_identifiers: Vec, - ret: proc_macro2::TokenStream, - ret_ty: proc_macro2::TokenStream, - ) { - if !is_variadic { - assert_eq!(args.len(), args_identifiers.len()); - } - - let signature = quote! { unsafe extern #abi fn ( #( #args),* ) #ret }; - let member = if is_required { - signature - } else { - quote! { Result<#signature, ::libloading::Error> } - }; - - self.struct_members.push(quote! { - pub #ident: #member, - }); - - // N.B: If the signature was required, it won't be wrapped in a Result<...> - // and we can simply call it directly. - let fn_ = if is_required { - quote! { self.#ident } - } else { - quote! { self.#ident.as_ref().expect("Expected function, got error.") } - }; - let call_body = quote! { - (#fn_)(#( #args_identifiers ),*) - }; - - // We can't implement variadic functions from C easily, so we allow to - // access the function pointer so that the user can call it just fine. - if !is_variadic { - self.struct_implementation.push(quote! { - pub unsafe fn #ident ( &self, #( #args ),* ) -> #ret_ty { - #call_body - } - }); - } - - // N.B: Unwrap the signature upon construction if it is required to be resolved. - let ident_str = codegen::helpers::ast_ty::cstr_expr(ident.to_string()); - self.constructor_inits.push(if is_required { - quote! { - let #ident = __library.get(#ident_str).map(|sym| *sym)?; - } - } else { - quote! { - let #ident = __library.get(#ident_str).map(|sym| *sym); - } - }); - - self.init_fields.push(quote! { - #ident - }); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/error.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/error.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/error.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/error.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -use std::error; -use std::fmt; - -/// Errors that can occur during code generation. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum Error { - /// Tried to generate an opaque blob for a type that did not have a layout. - NoLayoutForOpaqueBlob, - - /// Tried to instantiate an opaque template definition, or a template - /// definition that is too difficult for us to understand (like a partial - /// template specialization). - InstantiationOfOpaqueType, -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(match *self { - Error::NoLayoutForOpaqueBlob => { - "Tried to generate an opaque blob, but had no layout" - } - Error::InstantiationOfOpaqueType => { - "Instantiation of opaque template type or partial template \ - specialization" - } - }) - } -} - -impl error::Error for Error {} - -/// A `Result` of `T` or an error of `bindgen::codegen::error::Error`. -pub type Result = ::std::result::Result; diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/helpers.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/helpers.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/helpers.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/helpers.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,299 +0,0 @@ -//! Helpers for code generation that don't need macro expansion. - -use crate::ir::context::BindgenContext; -use crate::ir::layout::Layout; -use proc_macro2::{Ident, Span, TokenStream}; -use quote::TokenStreamExt; - -pub mod attributes { - use proc_macro2::{Ident, Span, TokenStream}; - use std::str::FromStr; - - pub fn repr(which: &str) -> TokenStream { - let which = Ident::new(which, Span::call_site()); - quote! { - #[repr( #which )] - } - } - - pub fn repr_list(which_ones: &[&str]) -> TokenStream { - let which_ones = which_ones - .iter() - .cloned() - .map(|one| TokenStream::from_str(one).expect("repr to be valid")); - quote! { - #[repr( #( #which_ones ),* )] - } - } - - pub fn derives(which_ones: &[&str]) -> TokenStream { - let which_ones = which_ones - .iter() - .cloned() - .map(|one| Ident::new(one, Span::call_site())); - quote! { - #[derive( #( #which_ones ),* )] - } - } - - pub fn inline() -> TokenStream { - quote! { - #[inline] - } - } - - pub fn must_use() -> TokenStream { - quote! { - #[must_use] - } - } - - pub fn non_exhaustive() -> TokenStream { - quote! { - #[non_exhaustive] - } - } - - pub fn doc(comment: String) -> TokenStream { - // NOTE(emilio): By this point comments are already preprocessed and in - // `///` form. Quote turns them into `#[doc]` comments, but oh well. - TokenStream::from_str(&comment).unwrap() - } - - pub fn link_name(name: &str) -> TokenStream { - // LLVM mangles the name by default but it's already mangled. - // Prefixing the name with \u{1} should tell LLVM to not mangle it. - let name = format!("\u{1}{}", name); - quote! { - #[link_name = #name] - } - } -} - -/// Generates a proper type for a field or type with a given `Layout`, that is, -/// a type with the correct size and alignment restrictions. -pub fn blob(ctx: &BindgenContext, layout: Layout) -> TokenStream { - let opaque = layout.opaque(); - - // FIXME(emilio, #412): We fall back to byte alignment, but there are - // some things that legitimately are more than 8-byte aligned. - // - // Eventually we should be able to `unwrap` here, but... - let ty_name = match opaque.known_rust_type_for_array(ctx) { - Some(ty) => ty, - None => { - warn!("Found unknown alignment on code generation!"); - "u8" - } - }; - - let ty_name = Ident::new(ty_name, Span::call_site()); - - let data_len = opaque.array_size(ctx).unwrap_or(layout.size); - - if data_len == 1 { - quote! { - #ty_name - } - } else { - quote! { - [ #ty_name ; #data_len ] - } - } -} - -/// Integer type of the same size as the given `Layout`. -pub fn integer_type( - ctx: &BindgenContext, - layout: Layout, -) -> Option { - let name = Layout::known_type_for_size(ctx, layout.size)?; - let name = Ident::new(name, Span::call_site()); - Some(quote! { #name }) -} - -/// Generates a bitfield allocation unit type for a type with the given `Layout`. -pub fn bitfield_unit(ctx: &BindgenContext, layout: Layout) -> TokenStream { - let mut tokens = quote! {}; - - if ctx.options().enable_cxx_namespaces { - tokens.append_all(quote! { root:: }); - } - - let size = layout.size; - tokens.append_all(quote! { - __BindgenBitfieldUnit<[u8; #size]> - }); - - tokens -} - -pub mod ast_ty { - use crate::ir::context::BindgenContext; - use crate::ir::function::FunctionSig; - use crate::ir::layout::Layout; - use crate::ir::ty::FloatKind; - use proc_macro2::{self, TokenStream}; - use std::str::FromStr; - - pub fn c_void(ctx: &BindgenContext) -> TokenStream { - // ctypes_prefix takes precedence - match ctx.options().ctypes_prefix { - Some(ref prefix) => { - let prefix = TokenStream::from_str(prefix.as_str()).unwrap(); - quote! { - #prefix::c_void - } - } - None => { - if ctx.options().use_core && - ctx.options().rust_features.core_ffi_c_void - { - quote! { ::core::ffi::c_void } - } else { - quote! { ::std::os::raw::c_void } - } - } - } - } - - pub fn raw_type(ctx: &BindgenContext, name: &str) -> TokenStream { - let ident = ctx.rust_ident_raw(name); - match ctx.options().ctypes_prefix { - Some(ref prefix) => { - let prefix = TokenStream::from_str(prefix.as_str()).unwrap(); - quote! { - #prefix::#ident - } - } - None => quote! { - ::std::os::raw::#ident - }, - } - } - - pub fn float_kind_rust_type( - ctx: &BindgenContext, - fk: FloatKind, - layout: Option, - ) -> TokenStream { - // TODO: we probably should take the type layout into account more - // often? - // - // Also, maybe this one shouldn't be the default? - match (fk, ctx.options().convert_floats) { - (FloatKind::Float, true) => quote! { f32 }, - (FloatKind::Double, true) => quote! { f64 }, - (FloatKind::Float, false) => raw_type(ctx, "c_float"), - (FloatKind::Double, false) => raw_type(ctx, "c_double"), - (FloatKind::LongDouble, _) => { - match layout { - Some(layout) => { - match layout.size { - 4 => quote! { f32 }, - 8 => quote! { f64 }, - // TODO(emilio): If rust ever gains f128 we should - // use it here and below. - _ => super::integer_type(ctx, layout) - .unwrap_or(quote! { f64 }), - } - } - None => { - debug_assert!( - false, - "How didn't we know the layout for a primitive type?" - ); - quote! { f64 } - } - } - } - (FloatKind::Float128, _) => { - if ctx.options().rust_features.i128_and_u128 { - quote! { u128 } - } else { - quote! { [u64; 2] } - } - } - } - } - - pub fn int_expr(val: i64) -> TokenStream { - // Don't use quote! { #val } because that adds the type suffix. - let val = proc_macro2::Literal::i64_unsuffixed(val); - quote!(#val) - } - - pub fn uint_expr(val: u64) -> TokenStream { - // Don't use quote! { #val } because that adds the type suffix. - let val = proc_macro2::Literal::u64_unsuffixed(val); - quote!(#val) - } - - pub fn byte_array_expr(bytes: &[u8]) -> TokenStream { - let mut bytes: Vec<_> = bytes.to_vec(); - bytes.push(0); - quote! { [ #(#bytes),* ] } - } - - pub fn cstr_expr(mut string: String) -> TokenStream { - string.push('\0'); - let b = proc_macro2::Literal::byte_string(string.as_bytes()); - quote! { - #b - } - } - - pub fn float_expr(ctx: &BindgenContext, f: f64) -> Result { - if f.is_finite() { - let val = proc_macro2::Literal::f64_unsuffixed(f); - - return Ok(quote!(#val)); - } - - let prefix = ctx.trait_prefix(); - - if f.is_nan() { - return Ok(quote! { - ::#prefix::f64::NAN - }); - } - - if f.is_infinite() { - return Ok(if f.is_sign_positive() { - quote! { - ::#prefix::f64::INFINITY - } - } else { - quote! { - ::#prefix::f64::NEG_INFINITY - } - }); - } - - warn!("Unknown non-finite float number: {:?}", f); - Err(()) - } - - pub fn arguments_from_signature( - signature: &FunctionSig, - ctx: &BindgenContext, - ) -> Vec { - let mut unnamed_arguments = 0; - signature - .argument_types() - .iter() - .map(|&(ref name, _ty)| match *name { - Some(ref name) => { - let name = ctx.rust_ident(name); - quote! { #name } - } - None => { - unnamed_arguments += 1; - let name = - ctx.rust_ident(format!("arg{}", unnamed_arguments)); - quote! { #name } - } - }) - .collect() - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/impl_debug.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/impl_debug.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/impl_debug.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/impl_debug.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,245 +0,0 @@ -use crate::ir::comp::{BitfieldUnit, CompKind, Field, FieldData, FieldMethods}; -use crate::ir::context::BindgenContext; -use crate::ir::item::{HasTypeParamInArray, IsOpaque, Item, ItemCanonicalName}; -use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT}; - -pub fn gen_debug_impl( - ctx: &BindgenContext, - fields: &[Field], - item: &Item, - kind: CompKind, -) -> proc_macro2::TokenStream { - let struct_name = item.canonical_name(ctx); - let mut format_string = format!("{} {{{{ ", struct_name); - let mut tokens = vec![]; - - if item.is_opaque(ctx, &()) { - format_string.push_str("opaque"); - } else { - match kind { - CompKind::Union => { - format_string.push_str("union"); - } - CompKind::Struct => { - let processed_fields = fields.iter().filter_map(|f| match f { - Field::DataMember(ref fd) => fd.impl_debug(ctx, ()), - Field::Bitfields(ref bu) => bu.impl_debug(ctx, ()), - }); - - for (i, (fstring, toks)) in processed_fields.enumerate() { - if i > 0 { - format_string.push_str(", "); - } - tokens.extend(toks); - format_string.push_str(&fstring); - } - } - } - } - - format_string.push_str(" }}"); - tokens.insert(0, quote! { #format_string }); - - let prefix = ctx.trait_prefix(); - - quote! { - fn fmt(&self, f: &mut ::#prefix::fmt::Formatter<'_>) -> ::#prefix ::fmt::Result { - write!(f, #( #tokens ),*) - } - } -} - -/// A trait for the things which we can codegen tokens that contribute towards a -/// generated `impl Debug`. -pub trait ImplDebug<'a> { - /// Any extra parameter required by this a particular `ImplDebug` implementation. - type Extra; - - /// Generate a format string snippet to be included in the larger `impl Debug` - /// format string, and the code to get the format string's interpolation values. - fn impl_debug( - &self, - ctx: &BindgenContext, - extra: Self::Extra, - ) -> Option<(String, Vec)>; -} - -impl<'a> ImplDebug<'a> for FieldData { - type Extra = (); - - fn impl_debug( - &self, - ctx: &BindgenContext, - _: Self::Extra, - ) -> Option<(String, Vec)> { - if let Some(name) = self.name() { - ctx.resolve_item(self.ty()).impl_debug(ctx, name) - } else { - None - } - } -} - -impl<'a> ImplDebug<'a> for BitfieldUnit { - type Extra = (); - - fn impl_debug( - &self, - ctx: &BindgenContext, - _: Self::Extra, - ) -> Option<(String, Vec)> { - let mut format_string = String::new(); - let mut tokens = vec![]; - for (i, bitfield) in self.bitfields().iter().enumerate() { - if i > 0 { - format_string.push_str(", "); - } - - if let Some(bitfield_name) = bitfield.name() { - format_string.push_str(&format!("{} : {{:?}}", bitfield_name)); - let getter_name = bitfield.getter_name(); - let name_ident = ctx.rust_ident_raw(getter_name); - tokens.push(quote! { - self.#name_ident () - }); - } - } - - Some((format_string, tokens)) - } -} - -impl<'a> ImplDebug<'a> for Item { - type Extra = &'a str; - - fn impl_debug( - &self, - ctx: &BindgenContext, - name: &str, - ) -> Option<(String, Vec)> { - let name_ident = ctx.rust_ident(name); - - // We don't know if blocklisted items `impl Debug` or not, so we can't - // add them to the format string we're building up. - if !ctx.allowlisted_items().contains(&self.id()) { - return None; - } - - let ty = match self.as_type() { - Some(ty) => ty, - None => { - return None; - } - }; - - fn debug_print( - name: &str, - name_ident: proc_macro2::TokenStream, - ) -> Option<(String, Vec)> { - Some(( - format!("{}: {{:?}}", name), - vec![quote! { - self.#name_ident - }], - )) - } - - match *ty.kind() { - // Handle the simple cases. - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Complex(..) | - TypeKind::Function(..) | - TypeKind::Enum(..) | - TypeKind::Reference(..) | - TypeKind::UnresolvedTypeRef(..) | - TypeKind::ObjCInterface(..) | - TypeKind::ObjCId | - TypeKind::Comp(..) | - TypeKind::ObjCSel => debug_print(name, quote! { #name_ident }), - - TypeKind::TemplateInstantiation(ref inst) => { - if inst.is_opaque(ctx, self) { - Some((format!("{}: opaque", name), vec![])) - } else { - debug_print(name, quote! { #name_ident }) - } - } - - // The generic is not required to implement Debug, so we can not debug print that type - TypeKind::TypeParam => { - Some((format!("{}: Non-debuggable generic", name), vec![])) - } - - TypeKind::Array(_, len) => { - // Generics are not required to implement Debug - if self.has_type_param_in_array(ctx) { - Some(( - format!("{}: Array with length {}", name, len), - vec![], - )) - } else if len < RUST_DERIVE_IN_ARRAY_LIMIT || - ctx.options().rust_features().larger_arrays - { - // The simple case - debug_print(name, quote! { #name_ident }) - } else if ctx.options().use_core { - // There is no String in core; reducing field visibility to avoid breaking - // no_std setups. - Some((format!("{}: [...]", name), vec![])) - } else { - // Let's implement our own print function - Some(( - format!("{}: [{{}}]", name), - vec![quote! { - self.#name_ident - .iter() - .enumerate() - .map(|(i, v)| format!("{}{:?}", if i > 0 { ", " } else { "" }, v)) - .collect::() - }], - )) - } - } - TypeKind::Vector(_, len) => { - if ctx.options().use_core { - // There is no format! in core; reducing field visibility to avoid breaking - // no_std setups. - Some((format!("{}(...)", name), vec![])) - } else { - let self_ids = 0..len; - Some(( - format!("{}({{}})", name), - vec![quote! { - #(format!("{:?}", self.#self_ids)),* - }], - )) - } - } - - TypeKind::ResolvedTypeRef(t) | - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::BlockPointer(t) => { - // We follow the aliases - ctx.resolve_item(t).impl_debug(ctx, name) - } - - TypeKind::Pointer(inner) => { - let inner_type = ctx.resolve_type(inner).canonical_type(ctx); - match *inner_type.kind() { - TypeKind::Function(ref sig) - if !sig.function_pointers_can_derive() => - { - Some((format!("{}: FunctionPointer", name), vec![])) - } - _ => debug_print(name, quote! { #name_ident }), - } - } - - TypeKind::Opaque => None, - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/impl_partialeq.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/impl_partialeq.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/impl_partialeq.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/impl_partialeq.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,142 +0,0 @@ -use crate::ir::comp::{CompInfo, CompKind, Field, FieldMethods}; -use crate::ir::context::BindgenContext; -use crate::ir::item::{IsOpaque, Item}; -use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT}; - -/// Generate a manual implementation of `PartialEq` trait for the -/// specified compound type. -pub fn gen_partialeq_impl( - ctx: &BindgenContext, - comp_info: &CompInfo, - item: &Item, - ty_for_impl: &proc_macro2::TokenStream, -) -> Option { - let mut tokens = vec![]; - - if item.is_opaque(ctx, &()) { - tokens.push(quote! { - &self._bindgen_opaque_blob[..] == &other._bindgen_opaque_blob[..] - }); - } else if comp_info.kind() == CompKind::Union { - assert!(!ctx.options().rust_features().untagged_union); - tokens.push(quote! { - &self.bindgen_union_field[..] == &other.bindgen_union_field[..] - }); - } else { - for base in comp_info.base_members().iter() { - if !base.requires_storage(ctx) { - continue; - } - - let ty_item = ctx.resolve_item(base.ty); - let field_name = &base.field_name; - - if ty_item.is_opaque(ctx, &()) { - let field_name = ctx.rust_ident(field_name); - tokens.push(quote! { - &self. #field_name [..] == &other. #field_name [..] - }); - } else { - tokens.push(gen_field(ctx, ty_item, field_name)); - } - } - - for field in comp_info.fields() { - match *field { - Field::DataMember(ref fd) => { - let ty_item = ctx.resolve_item(fd.ty()); - let name = fd.name().unwrap(); - tokens.push(gen_field(ctx, ty_item, name)); - } - Field::Bitfields(ref bu) => { - for bitfield in bu.bitfields() { - if bitfield.name().is_some() { - let getter_name = bitfield.getter_name(); - let name_ident = ctx.rust_ident_raw(getter_name); - tokens.push(quote! { - self.#name_ident () == other.#name_ident () - }); - } - } - } - } - } - } - - Some(quote! { - fn eq(&self, other: & #ty_for_impl) -> bool { - #( #tokens )&&* - } - }) -} - -fn gen_field( - ctx: &BindgenContext, - ty_item: &Item, - name: &str, -) -> proc_macro2::TokenStream { - fn quote_equals( - name_ident: proc_macro2::Ident, - ) -> proc_macro2::TokenStream { - quote! { self.#name_ident == other.#name_ident } - } - - let name_ident = ctx.rust_ident(name); - let ty = ty_item.expect_type(); - - match *ty.kind() { - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Complex(..) | - TypeKind::Float(..) | - TypeKind::Enum(..) | - TypeKind::TypeParam | - TypeKind::UnresolvedTypeRef(..) | - TypeKind::Reference(..) | - TypeKind::ObjCInterface(..) | - TypeKind::ObjCId | - TypeKind::ObjCSel | - TypeKind::Comp(..) | - TypeKind::Pointer(_) | - TypeKind::Function(..) | - TypeKind::Opaque => quote_equals(name_ident), - - TypeKind::TemplateInstantiation(ref inst) => { - if inst.is_opaque(ctx, ty_item) { - quote! { - &self. #name_ident [..] == &other. #name_ident [..] - } - } else { - quote_equals(name_ident) - } - } - - TypeKind::Array(_, len) => { - if len <= RUST_DERIVE_IN_ARRAY_LIMIT || - ctx.options().rust_features().larger_arrays - { - quote_equals(name_ident) - } else { - quote! { - &self. #name_ident [..] == &other. #name_ident [..] - } - } - } - TypeKind::Vector(_, len) => { - let self_ids = 0..len; - let other_ids = 0..len; - quote! { - #(self.#self_ids == other.#other_ids &&)* true - } - } - - TypeKind::ResolvedTypeRef(t) | - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::BlockPointer(t) => { - let inner_item = ctx.resolve_item(t); - gen_field(ctx, inner_item, name) - } - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/mod.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,4835 +0,0 @@ -mod dyngen; -mod error; -mod helpers; -mod impl_debug; -mod impl_partialeq; -pub mod struct_layout; - -#[cfg(test)] -#[allow(warnings)] -pub(crate) mod bitfield_unit; -#[cfg(all(test, target_endian = "little"))] -mod bitfield_unit_tests; - -use self::dyngen::DynamicItems; -use self::helpers::attributes; -use self::struct_layout::StructLayoutTracker; - -use super::BindgenOptions; - -use crate::ir::analysis::{HasVtable, Sizedness}; -use crate::ir::annotations::FieldAccessorKind; -use crate::ir::comment; -use crate::ir::comp::{ - Base, Bitfield, BitfieldUnit, CompInfo, CompKind, Field, FieldData, - FieldMethods, Method, MethodKind, -}; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::derive::{ - CanDerive, CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, - CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, -}; -use crate::ir::dot; -use crate::ir::enum_ty::{Enum, EnumVariant, EnumVariantValue}; -use crate::ir::function::{Abi, Function, FunctionKind, FunctionSig, Linkage}; -use crate::ir::int::IntKind; -use crate::ir::item::{IsOpaque, Item, ItemCanonicalName, ItemCanonicalPath}; -use crate::ir::item_kind::ItemKind; -use crate::ir::layout::Layout; -use crate::ir::module::Module; -use crate::ir::objc::{ObjCInterface, ObjCMethod}; -use crate::ir::template::{ - AsTemplateParam, TemplateInstantiation, TemplateParameters, -}; -use crate::ir::ty::{Type, TypeKind}; -use crate::ir::var::Var; - -use proc_macro2::{self, Ident, Span}; -use quote::TokenStreamExt; - -use crate::{Entry, HashMap, HashSet}; -use std::borrow::Cow; -use std::cell::Cell; -use std::collections::VecDeque; -use std::fmt::Write; -use std::iter; -use std::ops; -use std::str::FromStr; - -// Name of type defined in constified enum module -pub static CONSTIFIED_ENUM_MODULE_REPR_NAME: &str = "Type"; - -fn top_level_path( - ctx: &BindgenContext, - item: &Item, -) -> Vec { - let mut path = vec![quote! { self }]; - - if ctx.options().enable_cxx_namespaces { - for _ in 0..item.codegen_depth(ctx) { - path.push(quote! { super }); - } - } - - path -} - -fn root_import( - ctx: &BindgenContext, - module: &Item, -) -> proc_macro2::TokenStream { - assert!(ctx.options().enable_cxx_namespaces, "Somebody messed it up"); - assert!(module.is_module()); - - let mut path = top_level_path(ctx, module); - - let root = ctx.root_module().canonical_name(ctx); - let root_ident = ctx.rust_ident(&root); - path.push(quote! { #root_ident }); - - let mut tokens = quote! {}; - tokens.append_separated(path, quote!(::)); - - quote! { - #[allow(unused_imports)] - use #tokens ; - } -} - -bitflags! { - struct DerivableTraits: u16 { - const DEBUG = 1 << 0; - const DEFAULT = 1 << 1; - const COPY = 1 << 2; - const CLONE = 1 << 3; - const HASH = 1 << 4; - const PARTIAL_ORD = 1 << 5; - const ORD = 1 << 6; - const PARTIAL_EQ = 1 << 7; - const EQ = 1 << 8; - } -} - -fn derives_of_item(item: &Item, ctx: &BindgenContext) -> DerivableTraits { - let mut derivable_traits = DerivableTraits::empty(); - - if item.can_derive_debug(ctx) && !item.annotations().disallow_debug() { - derivable_traits |= DerivableTraits::DEBUG; - } - - if item.can_derive_default(ctx) && !item.annotations().disallow_default() { - derivable_traits |= DerivableTraits::DEFAULT; - } - - let all_template_params = item.all_template_params(ctx); - - if item.can_derive_copy(ctx) && !item.annotations().disallow_copy() { - derivable_traits |= DerivableTraits::COPY; - - if ctx.options().rust_features().builtin_clone_impls || - !all_template_params.is_empty() - { - // FIXME: This requires extra logic if you have a big array in a - // templated struct. The reason for this is that the magic: - // fn clone(&self) -> Self { *self } - // doesn't work for templates. - // - // It's not hard to fix though. - derivable_traits |= DerivableTraits::CLONE; - } - } - - if item.can_derive_hash(ctx) { - derivable_traits |= DerivableTraits::HASH; - } - - if item.can_derive_partialord(ctx) { - derivable_traits |= DerivableTraits::PARTIAL_ORD; - } - - if item.can_derive_ord(ctx) { - derivable_traits |= DerivableTraits::ORD; - } - - if item.can_derive_partialeq(ctx) { - derivable_traits |= DerivableTraits::PARTIAL_EQ; - } - - if item.can_derive_eq(ctx) { - derivable_traits |= DerivableTraits::EQ; - } - - derivable_traits -} - -impl From for Vec<&'static str> { - fn from(derivable_traits: DerivableTraits) -> Vec<&'static str> { - [ - (DerivableTraits::DEBUG, "Debug"), - (DerivableTraits::DEFAULT, "Default"), - (DerivableTraits::COPY, "Copy"), - (DerivableTraits::CLONE, "Clone"), - (DerivableTraits::HASH, "Hash"), - (DerivableTraits::PARTIAL_ORD, "PartialOrd"), - (DerivableTraits::ORD, "Ord"), - (DerivableTraits::PARTIAL_EQ, "PartialEq"), - (DerivableTraits::EQ, "Eq"), - ] - .iter() - .filter_map(|&(flag, derive)| { - Some(derive).filter(|_| derivable_traits.contains(flag)) - }) - .collect() - } -} - -struct CodegenResult<'a> { - items: Vec, - dynamic_items: DynamicItems, - - /// A monotonic counter used to add stable unique id's to stuff that doesn't - /// need to be referenced by anything. - codegen_id: &'a Cell, - - /// Whether a bindgen union has been generated at least once. - saw_bindgen_union: bool, - - /// Whether an incomplete array has been generated at least once. - saw_incomplete_array: bool, - - /// Whether Objective C types have been seen at least once. - saw_objc: bool, - - /// Whether Apple block types have been seen at least once. - saw_block: bool, - - /// Whether a bitfield allocation unit has been seen at least once. - saw_bitfield_unit: bool, - - items_seen: HashSet, - /// The set of generated function/var names, needed because in C/C++ is - /// legal to do something like: - /// - /// ```c++ - /// extern "C" { - /// void foo(); - /// extern int bar; - /// } - /// - /// extern "C" { - /// void foo(); - /// extern int bar; - /// } - /// ``` - /// - /// Being these two different declarations. - functions_seen: HashSet, - vars_seen: HashSet, - - /// Used for making bindings to overloaded functions. Maps from a canonical - /// function name to the number of overloads we have already codegen'd for - /// that name. This lets us give each overload a unique suffix. - overload_counters: HashMap, -} - -impl<'a> CodegenResult<'a> { - fn new(codegen_id: &'a Cell) -> Self { - CodegenResult { - items: vec![], - dynamic_items: DynamicItems::new(), - saw_bindgen_union: false, - saw_incomplete_array: false, - saw_objc: false, - saw_block: false, - saw_bitfield_unit: false, - codegen_id, - items_seen: Default::default(), - functions_seen: Default::default(), - vars_seen: Default::default(), - overload_counters: Default::default(), - } - } - - fn dynamic_items(&mut self) -> &mut DynamicItems { - &mut self.dynamic_items - } - - fn saw_bindgen_union(&mut self) { - self.saw_bindgen_union = true; - } - - fn saw_incomplete_array(&mut self) { - self.saw_incomplete_array = true; - } - - fn saw_objc(&mut self) { - self.saw_objc = true; - } - - fn saw_block(&mut self) { - self.saw_block = true; - } - - fn saw_bitfield_unit(&mut self) { - self.saw_bitfield_unit = true; - } - - fn seen>(&self, item: Id) -> bool { - self.items_seen.contains(&item.into()) - } - - fn set_seen>(&mut self, item: Id) { - self.items_seen.insert(item.into()); - } - - fn seen_function(&self, name: &str) -> bool { - self.functions_seen.contains(name) - } - - fn saw_function(&mut self, name: &str) { - self.functions_seen.insert(name.into()); - } - - /// Get the overload number for the given function name. Increments the - /// counter internally so the next time we ask for the overload for this - /// name, we get the incremented value, and so on. - fn overload_number(&mut self, name: &str) -> u32 { - let counter = self.overload_counters.entry(name.into()).or_insert(0); - let number = *counter; - *counter += 1; - number - } - - fn seen_var(&self, name: &str) -> bool { - self.vars_seen.contains(name) - } - - fn saw_var(&mut self, name: &str) { - self.vars_seen.insert(name.into()); - } - - fn inner(&mut self, cb: F) -> Vec - where - F: FnOnce(&mut Self), - { - let mut new = Self::new(self.codegen_id); - - cb(&mut new); - - self.saw_incomplete_array |= new.saw_incomplete_array; - self.saw_objc |= new.saw_objc; - self.saw_block |= new.saw_block; - self.saw_bitfield_unit |= new.saw_bitfield_unit; - self.saw_bindgen_union |= new.saw_bindgen_union; - - new.items - } -} - -impl<'a> ops::Deref for CodegenResult<'a> { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.items - } -} - -impl<'a> ops::DerefMut for CodegenResult<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.items - } -} - -/// A trait to convert a rust type into a pointer, optionally const, to the same -/// type. -trait ToPtr { - fn to_ptr(self, is_const: bool) -> proc_macro2::TokenStream; -} - -impl ToPtr for proc_macro2::TokenStream { - fn to_ptr(self, is_const: bool) -> proc_macro2::TokenStream { - if is_const { - quote! { *const #self } - } else { - quote! { *mut #self } - } - } -} - -/// An extension trait for `proc_macro2::TokenStream` that lets us append any implicit -/// template parameters that exist for some type, if necessary. -trait AppendImplicitTemplateParams { - fn append_implicit_template_params( - &mut self, - ctx: &BindgenContext, - item: &Item, - ); -} - -impl AppendImplicitTemplateParams for proc_macro2::TokenStream { - fn append_implicit_template_params( - &mut self, - ctx: &BindgenContext, - item: &Item, - ) { - let item = item.id().into_resolver().through_type_refs().resolve(ctx); - - match *item.expect_type().kind() { - TypeKind::UnresolvedTypeRef(..) => { - unreachable!("already resolved unresolved type refs") - } - TypeKind::ResolvedTypeRef(..) => { - unreachable!("we resolved item through type refs") - } - - // None of these types ever have implicit template parameters. - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Pointer(..) | - TypeKind::Reference(..) | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Complex(..) | - TypeKind::Array(..) | - TypeKind::TypeParam | - TypeKind::Opaque | - TypeKind::Function(..) | - TypeKind::Enum(..) | - TypeKind::ObjCId | - TypeKind::ObjCSel | - TypeKind::TemplateInstantiation(..) => return, - _ => {} - } - - let params: Vec<_> = item - .used_template_params(ctx) - .iter() - .map(|p| { - p.try_to_rust_ty(ctx, &()) - .expect("template params cannot fail to be a rust type") - }) - .collect(); - if !params.is_empty() { - self.append_all(quote! { - < #( #params ),* > - }); - } - } -} - -trait CodeGenerator { - /// Extra information from the caller. - type Extra; - - /// Extra information returned to the caller. - type Return; - - fn codegen<'a>( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'a>, - extra: &Self::Extra, - ) -> Self::Return; -} - -impl Item { - fn process_before_codegen( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult, - ) -> bool { - if !self.is_enabled_for_codegen(ctx) { - return false; - } - - if self.is_blocklisted(ctx) || result.seen(self.id()) { - debug!( - "::process_before_codegen: Ignoring hidden or seen: \ - self = {:?}", - self - ); - return false; - } - - if !ctx.codegen_items().contains(&self.id()) { - // TODO(emilio, #453): Figure out what to do when this happens - // legitimately, we could track the opaque stuff and disable the - // assertion there I guess. - warn!("Found non-allowlisted item in code generation: {:?}", self); - } - - result.set_seen(self.id()); - true - } -} - -impl CodeGenerator for Item { - type Extra = (); - type Return = (); - - fn codegen<'a>( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'a>, - _extra: &(), - ) { - debug!("::codegen: self = {:?}", self); - if !self.process_before_codegen(ctx, result) { - return; - } - - match *self.kind() { - ItemKind::Module(ref module) => { - module.codegen(ctx, result, self); - } - ItemKind::Function(ref fun) => { - fun.codegen(ctx, result, self); - } - ItemKind::Var(ref var) => { - var.codegen(ctx, result, self); - } - ItemKind::Type(ref ty) => { - ty.codegen(ctx, result, self); - } - } - } -} - -impl CodeGenerator for Module { - type Extra = Item; - type Return = (); - - fn codegen<'a>( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'a>, - item: &Item, - ) { - debug!("::codegen: item = {:?}", item); - - let codegen_self = |result: &mut CodegenResult, - found_any: &mut bool| { - for child in self.children() { - if ctx.codegen_items().contains(child) { - *found_any = true; - ctx.resolve_item(*child).codegen(ctx, result, &()); - } - } - - if item.id() == ctx.root_module() { - if result.saw_block { - utils::prepend_block_header(ctx, &mut *result); - } - if result.saw_bindgen_union { - utils::prepend_union_types(ctx, &mut *result); - } - if result.saw_incomplete_array { - utils::prepend_incomplete_array_types(ctx, &mut *result); - } - if ctx.need_bindgen_complex_type() { - utils::prepend_complex_type(&mut *result); - } - if result.saw_objc { - utils::prepend_objc_header(ctx, &mut *result); - } - if result.saw_bitfield_unit { - utils::prepend_bitfield_unit_type(ctx, &mut *result); - } - } - }; - - if !ctx.options().enable_cxx_namespaces || - (self.is_inline() && - !ctx.options().conservative_inline_namespaces) - { - codegen_self(result, &mut false); - return; - } - - let mut found_any = false; - let inner_items = result.inner(|result| { - result.push(root_import(ctx, item)); - - let path = item.namespace_aware_canonical_path(ctx).join("::"); - if let Some(raw_lines) = ctx.options().module_lines.get(&path) { - for raw_line in raw_lines { - found_any = true; - result.push( - proc_macro2::TokenStream::from_str(raw_line).unwrap(), - ); - } - } - - codegen_self(result, &mut found_any); - }); - - // Don't bother creating an empty module. - if !found_any { - return; - } - - let name = item.canonical_name(ctx); - let ident = ctx.rust_ident(name); - result.push(if item.id() == ctx.root_module() { - quote! { - #[allow(non_snake_case, non_camel_case_types, non_upper_case_globals)] - pub mod #ident { - #( #inner_items )* - } - } - } else { - quote! { - pub mod #ident { - #( #inner_items )* - } - } - }); - } -} - -impl CodeGenerator for Var { - type Extra = Item; - type Return = (); - - fn codegen<'a>( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'a>, - item: &Item, - ) { - use crate::ir::var::VarType; - debug!("::codegen: item = {:?}", item); - debug_assert!(item.is_enabled_for_codegen(ctx)); - - let canonical_name = item.canonical_name(ctx); - - if result.seen_var(&canonical_name) { - return; - } - result.saw_var(&canonical_name); - - let canonical_ident = ctx.rust_ident(&canonical_name); - - // We can't generate bindings to static variables of templates. The - // number of actual variables for a single declaration are open ended - // and we don't know what instantiations do or don't exist. - if !item.all_template_params(ctx).is_empty() { - return; - } - - let mut attrs = vec![]; - if let Some(comment) = item.comment(ctx) { - attrs.push(attributes::doc(comment)); - } - - let ty = self.ty().to_rust_ty_or_opaque(ctx, &()); - - if let Some(val) = self.val() { - match *val { - VarType::Bool(val) => { - result.push(quote! { - #(#attrs)* - pub const #canonical_ident : #ty = #val ; - }); - } - VarType::Int(val) => { - let int_kind = self - .ty() - .into_resolver() - .through_type_aliases() - .through_type_refs() - .resolve(ctx) - .expect_type() - .as_integer() - .unwrap(); - let val = if int_kind.is_signed() { - helpers::ast_ty::int_expr(val) - } else { - helpers::ast_ty::uint_expr(val as _) - }; - result.push(quote! { - #(#attrs)* - pub const #canonical_ident : #ty = #val ; - }); - } - VarType::String(ref bytes) => { - // Account the trailing zero. - // - // TODO: Here we ignore the type we just made up, probably - // we should refactor how the variable type and ty id work. - let len = bytes.len() + 1; - let ty = quote! { - [u8; #len] - }; - - match String::from_utf8(bytes.clone()) { - Ok(string) => { - let cstr = helpers::ast_ty::cstr_expr(string); - if ctx - .options() - .rust_features - .static_lifetime_elision - { - result.push(quote! { - #(#attrs)* - pub const #canonical_ident : &#ty = #cstr ; - }); - } else { - result.push(quote! { - #(#attrs)* - pub const #canonical_ident : &'static #ty = #cstr ; - }); - } - } - Err(..) => { - let bytes = helpers::ast_ty::byte_array_expr(bytes); - result.push(quote! { - #(#attrs)* - pub const #canonical_ident : #ty = #bytes ; - }); - } - } - } - VarType::Float(f) => { - if let Ok(expr) = helpers::ast_ty::float_expr(ctx, f) { - result.push(quote! { - #(#attrs)* - pub const #canonical_ident : #ty = #expr ; - }); - } - } - VarType::Char(c) => { - result.push(quote! { - #(#attrs)* - pub const #canonical_ident : #ty = #c ; - }); - } - } - } else { - // If necessary, apply a `#[link_name]` attribute - let link_name = self.mangled_name().unwrap_or_else(|| self.name()); - if !utils::names_will_be_identical_after_mangling( - &canonical_name, - link_name, - None, - ) { - attrs.push(attributes::link_name(link_name)); - } - - let maybe_mut = if self.is_const() { - quote! {} - } else { - quote! { mut } - }; - - let tokens = quote!( - extern "C" { - #(#attrs)* - pub static #maybe_mut #canonical_ident: #ty; - } - ); - - result.push(tokens); - } - } -} - -impl CodeGenerator for Type { - type Extra = Item; - type Return = (); - - fn codegen<'a>( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'a>, - item: &Item, - ) { - debug!("::codegen: item = {:?}", item); - debug_assert!(item.is_enabled_for_codegen(ctx)); - - match *self.kind() { - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Complex(..) | - TypeKind::Array(..) | - TypeKind::Vector(..) | - TypeKind::Pointer(..) | - TypeKind::Reference(..) | - TypeKind::Function(..) | - TypeKind::ResolvedTypeRef(..) | - TypeKind::Opaque | - TypeKind::TypeParam => { - // These items don't need code generation, they only need to be - // converted to rust types in fields, arguments, and such. - // NOTE(emilio): If you add to this list, make sure to also add - // it to BindgenContext::compute_allowlisted_and_codegen_items. - } - TypeKind::TemplateInstantiation(ref inst) => { - inst.codegen(ctx, result, item) - } - TypeKind::BlockPointer(inner) => { - if !ctx.options().generate_block { - return; - } - - let inner_item = - inner.into_resolver().through_type_refs().resolve(ctx); - let name = item.canonical_name(ctx); - - let inner_rust_type = { - if let TypeKind::Function(fnsig) = - inner_item.kind().expect_type().kind() - { - utils::fnsig_block(ctx, fnsig) - } else { - panic!("invalid block typedef: {:?}", inner_item) - } - }; - - let rust_name = ctx.rust_ident(&name); - - let mut tokens = if let Some(comment) = item.comment(ctx) { - attributes::doc(comment) - } else { - quote! {} - }; - - tokens.append_all(quote! { - pub type #rust_name = #inner_rust_type ; - }); - - result.push(tokens); - result.saw_block(); - } - TypeKind::Comp(ref ci) => ci.codegen(ctx, result, item), - TypeKind::TemplateAlias(inner, _) | TypeKind::Alias(inner) => { - let inner_item = - inner.into_resolver().through_type_refs().resolve(ctx); - let name = item.canonical_name(ctx); - let path = item.canonical_path(ctx); - - { - let through_type_aliases = inner - .into_resolver() - .through_type_refs() - .through_type_aliases() - .resolve(ctx); - - // Try to catch the common pattern: - // - // typedef struct foo { ... } foo; - // - // here, and also other more complex cases like #946. - if through_type_aliases.canonical_path(ctx) == path { - return; - } - } - - // If this is a known named type, disallow generating anything - // for it too. - let spelling = self.name().expect("Unnamed alias?"); - if utils::type_from_named(ctx, spelling).is_some() { - return; - } - - let mut outer_params = item.used_template_params(ctx); - - let is_opaque = item.is_opaque(ctx, &()); - let inner_rust_type = if is_opaque { - outer_params = vec![]; - self.to_opaque(ctx, item) - } else { - // Its possible that we have better layout information than - // the inner type does, so fall back to an opaque blob based - // on our layout if converting the inner item fails. - let mut inner_ty = inner_item - .try_to_rust_ty_or_opaque(ctx, &()) - .unwrap_or_else(|_| self.to_opaque(ctx, item)); - inner_ty.append_implicit_template_params(ctx, inner_item); - inner_ty - }; - - { - // FIXME(emilio): This is a workaround to avoid generating - // incorrect type aliases because of types that we haven't - // been able to resolve (because, eg, they depend on a - // template parameter). - // - // It's kind of a shame not generating them even when they - // could be referenced, but we already do the same for items - // with invalid template parameters, and at least this way - // they can be replaced, instead of generating plain invalid - // code. - let inner_canon_type = - inner_item.expect_type().canonical_type(ctx); - if inner_canon_type.is_invalid_type_param() { - warn!( - "Item contained invalid named type, skipping: \ - {:?}, {:?}", - item, inner_item - ); - return; - } - } - - let rust_name = ctx.rust_ident(&name); - - let mut tokens = if let Some(comment) = item.comment(ctx) { - attributes::doc(comment) - } else { - quote! {} - }; - - let alias_style = if ctx.options().type_alias.matches(&name) { - AliasVariation::TypeAlias - } else if ctx.options().new_type_alias.matches(&name) { - AliasVariation::NewType - } else if ctx.options().new_type_alias_deref.matches(&name) { - AliasVariation::NewTypeDeref - } else { - ctx.options().default_alias_style - }; - - // We prefer using `pub use` over `pub type` because of: - // https://github.com/rust-lang/rust/issues/26264 - // These are the only characters allowed in simple - // paths, eg `good::dogs::Bront`. - if inner_rust_type.to_string().chars().all(|c| matches!(c, 'A'..='Z' | 'a'..='z' | '0'..='9' | ':' | '_' | ' ')) && outer_params.is_empty() && - !is_opaque && - alias_style == AliasVariation::TypeAlias && - inner_item.expect_type().canonical_type(ctx).is_enum() - { - tokens.append_all(quote! { - pub use - }); - let path = top_level_path(ctx, item); - tokens.append_separated(path, quote!(::)); - tokens.append_all(quote! { - :: #inner_rust_type as #rust_name ; - }); - result.push(tokens); - return; - } - - tokens.append_all(match alias_style { - AliasVariation::TypeAlias => quote! { - pub type #rust_name - }, - AliasVariation::NewType | AliasVariation::NewTypeDeref => { - assert!( - ctx.options().rust_features().repr_transparent, - "repr_transparent feature is required to use {:?}", - alias_style - ); - - let mut attributes = - vec![attributes::repr("transparent")]; - let derivable_traits = derives_of_item(item, ctx); - if !derivable_traits.is_empty() { - let derives: Vec<_> = derivable_traits.into(); - attributes.push(attributes::derives(&derives)) - } - - quote! { - #( #attributes )* - pub struct #rust_name - } - } - }); - - let params: Vec<_> = outer_params - .into_iter() - .filter_map(|p| p.as_template_param(ctx, &())) - .collect(); - if params - .iter() - .any(|p| ctx.resolve_type(*p).is_invalid_type_param()) - { - warn!( - "Item contained invalid template \ - parameter: {:?}", - item - ); - return; - } - let params: Vec<_> = params - .iter() - .map(|p| { - p.try_to_rust_ty(ctx, &()).expect( - "type parameters can always convert to rust ty OK", - ) - }) - .collect(); - - if !params.is_empty() { - tokens.append_all(quote! { - < #( #params ),* > - }); - } - - tokens.append_all(match alias_style { - AliasVariation::TypeAlias => quote! { - = #inner_rust_type ; - }, - AliasVariation::NewType | AliasVariation::NewTypeDeref => { - quote! { - (pub #inner_rust_type) ; - } - } - }); - - if alias_style == AliasVariation::NewTypeDeref { - let prefix = ctx.trait_prefix(); - tokens.append_all(quote! { - impl ::#prefix::ops::Deref for #rust_name { - type Target = #inner_rust_type; - #[inline] - fn deref(&self) -> &Self::Target { - &self.0 - } - } - impl ::#prefix::ops::DerefMut for #rust_name { - #[inline] - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } - } - }); - } - - result.push(tokens); - } - TypeKind::Enum(ref ei) => ei.codegen(ctx, result, item), - TypeKind::ObjCId | TypeKind::ObjCSel => { - result.saw_objc(); - } - TypeKind::ObjCInterface(ref interface) => { - interface.codegen(ctx, result, item) - } - ref u @ TypeKind::UnresolvedTypeRef(..) => { - unreachable!("Should have been resolved after parsing {:?}!", u) - } - } - } -} - -struct Vtable<'a> { - item_id: ItemId, - #[allow(dead_code)] - methods: &'a [Method], - #[allow(dead_code)] - base_classes: &'a [Base], -} - -impl<'a> Vtable<'a> { - fn new( - item_id: ItemId, - methods: &'a [Method], - base_classes: &'a [Base], - ) -> Self { - Vtable { - item_id, - methods, - base_classes, - } - } -} - -impl<'a> CodeGenerator for Vtable<'a> { - type Extra = Item; - type Return = (); - - fn codegen<'b>( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'b>, - item: &Item, - ) { - assert_eq!(item.id(), self.item_id); - debug_assert!(item.is_enabled_for_codegen(ctx)); - - // For now, generate an empty struct, later we should generate function - // pointers and whatnot. - let name = ctx.rust_ident(&self.canonical_name(ctx)); - let void = helpers::ast_ty::c_void(ctx); - result.push(quote! { - #[repr(C)] - pub struct #name ( #void ); - }); - } -} - -impl<'a> ItemCanonicalName for Vtable<'a> { - fn canonical_name(&self, ctx: &BindgenContext) -> String { - format!("{}__bindgen_vtable", self.item_id.canonical_name(ctx)) - } -} - -impl<'a> TryToRustTy for Vtable<'a> { - type Extra = (); - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - _: &(), - ) -> error::Result { - let name = ctx.rust_ident(self.canonical_name(ctx)); - Ok(quote! { - #name - }) - } -} - -impl CodeGenerator for TemplateInstantiation { - type Extra = Item; - type Return = (); - - fn codegen<'a>( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'a>, - item: &Item, - ) { - debug_assert!(item.is_enabled_for_codegen(ctx)); - - // Although uses of instantiations don't need code generation, and are - // just converted to rust types in fields, vars, etc, we take this - // opportunity to generate tests for their layout here. If the - // instantiation is opaque, then its presumably because we don't - // properly understand it (maybe because of specializations), and so we - // shouldn't emit layout tests either. - if !ctx.options().layout_tests || self.is_opaque(ctx, item) { - return; - } - - // If there are any unbound type parameters, then we can't generate a - // layout test because we aren't dealing with a concrete type with a - // concrete size and alignment. - if ctx.uses_any_template_parameters(item.id()) { - return; - } - - let layout = item.kind().expect_type().layout(ctx); - - if let Some(layout) = layout { - let size = layout.size; - let align = layout.align; - - let name = item.full_disambiguated_name(ctx); - let mut fn_name = - format!("__bindgen_test_layout_{}_instantiation", name); - let times_seen = result.overload_number(&fn_name); - if times_seen > 0 { - write!(&mut fn_name, "_{}", times_seen).unwrap(); - } - - let fn_name = ctx.rust_ident_raw(fn_name); - - let prefix = ctx.trait_prefix(); - let ident = item.to_rust_ty_or_opaque(ctx, &()); - let size_of_expr = quote! { - ::#prefix::mem::size_of::<#ident>() - }; - let align_of_expr = quote! { - ::#prefix::mem::align_of::<#ident>() - }; - - let item = quote! { - #[test] - fn #fn_name() { - assert_eq!(#size_of_expr, #size, - concat!("Size of template specialization: ", - stringify!(#ident))); - assert_eq!(#align_of_expr, #align, - concat!("Alignment of template specialization: ", - stringify!(#ident))); - } - }; - - result.push(item); - } - } -} - -/// Trait for implementing the code generation of a struct or union field. -trait FieldCodegen<'a> { - type Extra; - - fn codegen( - &self, - ctx: &BindgenContext, - fields_should_be_private: bool, - codegen_depth: usize, - accessor_kind: FieldAccessorKind, - parent: &CompInfo, - result: &mut CodegenResult, - struct_layout: &mut StructLayoutTracker, - fields: &mut F, - methods: &mut M, - extra: Self::Extra, - ) where - F: Extend, - M: Extend; -} - -impl<'a> FieldCodegen<'a> for Field { - type Extra = (); - - fn codegen( - &self, - ctx: &BindgenContext, - fields_should_be_private: bool, - codegen_depth: usize, - accessor_kind: FieldAccessorKind, - parent: &CompInfo, - result: &mut CodegenResult, - struct_layout: &mut StructLayoutTracker, - fields: &mut F, - methods: &mut M, - _: (), - ) where - F: Extend, - M: Extend, - { - match *self { - Field::DataMember(ref data) => { - data.codegen( - ctx, - fields_should_be_private, - codegen_depth, - accessor_kind, - parent, - result, - struct_layout, - fields, - methods, - (), - ); - } - Field::Bitfields(ref unit) => { - unit.codegen( - ctx, - fields_should_be_private, - codegen_depth, - accessor_kind, - parent, - result, - struct_layout, - fields, - methods, - (), - ); - } - } - } -} - -impl<'a> FieldCodegen<'a> for FieldData { - type Extra = (); - - fn codegen( - &self, - ctx: &BindgenContext, - fields_should_be_private: bool, - codegen_depth: usize, - accessor_kind: FieldAccessorKind, - parent: &CompInfo, - result: &mut CodegenResult, - struct_layout: &mut StructLayoutTracker, - fields: &mut F, - methods: &mut M, - _: (), - ) where - F: Extend, - M: Extend, - { - // Bitfields are handled by `FieldCodegen` implementations for - // `BitfieldUnit` and `Bitfield`. - assert!(self.bitfield_width().is_none()); - - let field_item = - self.ty().into_resolver().through_type_refs().resolve(ctx); - let field_ty = field_item.expect_type(); - let mut ty = self.ty().to_rust_ty_or_opaque(ctx, &()); - ty.append_implicit_template_params(ctx, field_item); - - // NB: If supported, we use proper `union` types. - let ty = if parent.is_union() && !struct_layout.is_rust_union() { - result.saw_bindgen_union(); - if ctx.options().enable_cxx_namespaces { - quote! { - root::__BindgenUnionField<#ty> - } - } else { - quote! { - __BindgenUnionField<#ty> - } - } - } else if let Some(item) = field_ty.is_incomplete_array(ctx) { - result.saw_incomplete_array(); - - let inner = item.to_rust_ty_or_opaque(ctx, &()); - - if ctx.options().enable_cxx_namespaces { - quote! { - root::__IncompleteArrayField<#inner> - } - } else { - quote! { - __IncompleteArrayField<#inner> - } - } - } else { - ty - }; - - let mut field = quote! {}; - if ctx.options().generate_comments { - if let Some(raw_comment) = self.comment() { - let comment = - comment::preprocess(raw_comment, codegen_depth + 1); - field = attributes::doc(comment); - } - } - - let field_name = self - .name() - .map(|name| ctx.rust_mangle(name).into_owned()) - .expect("Each field should have a name in codegen!"); - let field_ident = ctx.rust_ident_raw(field_name.as_str()); - - if let Some(padding_field) = - struct_layout.saw_field(&field_name, field_ty, self.offset()) - { - fields.extend(Some(padding_field)); - } - - let is_private = (!self.is_public() && - ctx.options().respect_cxx_access_specs) || - self.annotations() - .private_fields() - .unwrap_or(fields_should_be_private); - - let accessor_kind = - self.annotations().accessor_kind().unwrap_or(accessor_kind); - - if is_private { - field.append_all(quote! { - #field_ident : #ty , - }); - } else { - field.append_all(quote! { - pub #field_ident : #ty , - }); - } - - fields.extend(Some(field)); - - // TODO: Factor the following code out, please! - if accessor_kind == FieldAccessorKind::None { - return; - } - - let getter_name = ctx.rust_ident_raw(format!("get_{}", field_name)); - let mutable_getter_name = - ctx.rust_ident_raw(format!("get_{}_mut", field_name)); - let field_name = ctx.rust_ident_raw(field_name); - - methods.extend(Some(match accessor_kind { - FieldAccessorKind::None => unreachable!(), - FieldAccessorKind::Regular => { - quote! { - #[inline] - pub fn #getter_name(&self) -> & #ty { - &self.#field_name - } - - #[inline] - pub fn #mutable_getter_name(&mut self) -> &mut #ty { - &mut self.#field_name - } - } - } - FieldAccessorKind::Unsafe => { - quote! { - #[inline] - pub unsafe fn #getter_name(&self) -> & #ty { - &self.#field_name - } - - #[inline] - pub unsafe fn #mutable_getter_name(&mut self) -> &mut #ty { - &mut self.#field_name - } - } - } - FieldAccessorKind::Immutable => { - quote! { - #[inline] - pub fn #getter_name(&self) -> & #ty { - &self.#field_name - } - } - } - })); - } -} - -impl BitfieldUnit { - /// Get the constructor name for this bitfield unit. - fn ctor_name(&self) -> proc_macro2::TokenStream { - let ctor_name = Ident::new( - &format!("new_bitfield_{}", self.nth()), - Span::call_site(), - ); - quote! { - #ctor_name - } - } -} - -impl Bitfield { - /// Extend an under construction bitfield unit constructor with this - /// bitfield. This sets the relevant bits on the `__bindgen_bitfield_unit` - /// variable that's being constructed. - fn extend_ctor_impl( - &self, - ctx: &BindgenContext, - param_name: proc_macro2::TokenStream, - mut ctor_impl: proc_macro2::TokenStream, - ) -> proc_macro2::TokenStream { - let bitfield_ty = ctx.resolve_type(self.ty()); - let bitfield_ty_layout = bitfield_ty - .layout(ctx) - .expect("Bitfield without layout? Gah!"); - let bitfield_int_ty = helpers::integer_type(ctx, bitfield_ty_layout) - .expect( - "Should already have verified that the bitfield is \ - representable as an int", - ); - - let offset = self.offset_into_unit(); - let width = self.width() as u8; - let prefix = ctx.trait_prefix(); - - ctor_impl.append_all(quote! { - __bindgen_bitfield_unit.set( - #offset, - #width, - { - let #param_name: #bitfield_int_ty = unsafe { - ::#prefix::mem::transmute(#param_name) - }; - #param_name as u64 - } - ); - }); - - ctor_impl - } -} - -fn access_specifier( - ctx: &BindgenContext, - is_pub: bool, -) -> proc_macro2::TokenStream { - if is_pub || !ctx.options().respect_cxx_access_specs { - quote! { pub } - } else { - quote! {} - } -} - -impl<'a> FieldCodegen<'a> for BitfieldUnit { - type Extra = (); - - fn codegen( - &self, - ctx: &BindgenContext, - fields_should_be_private: bool, - codegen_depth: usize, - accessor_kind: FieldAccessorKind, - parent: &CompInfo, - result: &mut CodegenResult, - struct_layout: &mut StructLayoutTracker, - fields: &mut F, - methods: &mut M, - _: (), - ) where - F: Extend, - M: Extend, - { - use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT; - - result.saw_bitfield_unit(); - - let layout = self.layout(); - let unit_field_ty = helpers::bitfield_unit(ctx, layout); - let field_ty = { - if parent.is_union() && !struct_layout.is_rust_union() { - result.saw_bindgen_union(); - if ctx.options().enable_cxx_namespaces { - quote! { - root::__BindgenUnionField<#unit_field_ty> - } - } else { - quote! { - __BindgenUnionField<#unit_field_ty> - } - } - } else { - unit_field_ty.clone() - } - }; - - { - let align_field_name = format!("_bitfield_align_{}", self.nth()); - let align_field_ident = ctx.rust_ident(&align_field_name); - let align_ty = match self.layout().align { - n if n >= 8 => quote! { u64 }, - 4 => quote! { u32 }, - 2 => quote! { u16 }, - _ => quote! { u8 }, - }; - let align_field = quote! { - pub #align_field_ident: [#align_ty; 0], - }; - fields.extend(Some(align_field)); - } - - let unit_field_name = format!("_bitfield_{}", self.nth()); - let unit_field_ident = ctx.rust_ident(&unit_field_name); - - let ctor_name = self.ctor_name(); - let mut ctor_params = vec![]; - let mut ctor_impl = quote! {}; - - // We cannot generate any constructor if the underlying storage can't - // implement AsRef<[u8]> / AsMut<[u8]> / etc, or can't derive Default. - // - // We don't check `larger_arrays` here because Default does still have - // the 32 items limitation. - let mut generate_ctor = layout.size <= RUST_DERIVE_IN_ARRAY_LIMIT; - - let mut access_spec = !fields_should_be_private; - for bf in self.bitfields() { - // Codegen not allowed for anonymous bitfields - if bf.name().is_none() { - continue; - } - - if layout.size > RUST_DERIVE_IN_ARRAY_LIMIT && - !ctx.options().rust_features().larger_arrays - { - continue; - } - - access_spec &= bf.is_public(); - let mut bitfield_representable_as_int = true; - - bf.codegen( - ctx, - fields_should_be_private, - codegen_depth, - accessor_kind, - parent, - result, - struct_layout, - fields, - methods, - (&unit_field_name, &mut bitfield_representable_as_int), - ); - - // Generating a constructor requires the bitfield to be representable as an integer. - if !bitfield_representable_as_int { - generate_ctor = false; - continue; - } - - let param_name = bitfield_getter_name(ctx, bf); - let bitfield_ty_item = ctx.resolve_item(bf.ty()); - let bitfield_ty = bitfield_ty_item.expect_type(); - let bitfield_ty = - bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item); - - ctor_params.push(quote! { - #param_name : #bitfield_ty - }); - ctor_impl = bf.extend_ctor_impl(ctx, param_name, ctor_impl); - } - - let access_spec = access_specifier(ctx, access_spec); - - let field = quote! { - #access_spec #unit_field_ident : #field_ty , - }; - fields.extend(Some(field)); - - if generate_ctor { - methods.extend(Some(quote! { - #[inline] - #access_spec fn #ctor_name ( #( #ctor_params ),* ) -> #unit_field_ty { - let mut __bindgen_bitfield_unit: #unit_field_ty = Default::default(); - #ctor_impl - __bindgen_bitfield_unit - } - })); - } - - struct_layout.saw_bitfield_unit(layout); - } -} - -fn bitfield_getter_name( - ctx: &BindgenContext, - bitfield: &Bitfield, -) -> proc_macro2::TokenStream { - let name = bitfield.getter_name(); - let name = ctx.rust_ident_raw(name); - quote! { #name } -} - -fn bitfield_setter_name( - ctx: &BindgenContext, - bitfield: &Bitfield, -) -> proc_macro2::TokenStream { - let setter = bitfield.setter_name(); - let setter = ctx.rust_ident_raw(setter); - quote! { #setter } -} - -impl<'a> FieldCodegen<'a> for Bitfield { - type Extra = (&'a str, &'a mut bool); - - fn codegen( - &self, - ctx: &BindgenContext, - fields_should_be_private: bool, - _codegen_depth: usize, - _accessor_kind: FieldAccessorKind, - parent: &CompInfo, - _result: &mut CodegenResult, - struct_layout: &mut StructLayoutTracker, - _fields: &mut F, - methods: &mut M, - (unit_field_name, bitfield_representable_as_int): (&'a str, &mut bool), - ) where - F: Extend, - M: Extend, - { - let prefix = ctx.trait_prefix(); - let getter_name = bitfield_getter_name(ctx, self); - let setter_name = bitfield_setter_name(ctx, self); - let unit_field_ident = Ident::new(unit_field_name, Span::call_site()); - - let bitfield_ty_item = ctx.resolve_item(self.ty()); - let bitfield_ty = bitfield_ty_item.expect_type(); - - let bitfield_ty_layout = bitfield_ty - .layout(ctx) - .expect("Bitfield without layout? Gah!"); - let bitfield_int_ty = - match helpers::integer_type(ctx, bitfield_ty_layout) { - Some(int_ty) => { - *bitfield_representable_as_int = true; - int_ty - } - None => { - *bitfield_representable_as_int = false; - return; - } - }; - - let bitfield_ty = - bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item); - - let offset = self.offset_into_unit(); - let width = self.width() as u8; - let access_spec = access_specifier( - ctx, - self.is_public() && !fields_should_be_private, - ); - - if parent.is_union() && !struct_layout.is_rust_union() { - methods.extend(Some(quote! { - #[inline] - #access_spec fn #getter_name(&self) -> #bitfield_ty { - unsafe { - ::#prefix::mem::transmute( - self.#unit_field_ident.as_ref().get(#offset, #width) - as #bitfield_int_ty - ) - } - } - - #[inline] - #access_spec fn #setter_name(&mut self, val: #bitfield_ty) { - unsafe { - let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); - self.#unit_field_ident.as_mut().set( - #offset, - #width, - val as u64 - ) - } - } - })); - } else { - methods.extend(Some(quote! { - #[inline] - #access_spec fn #getter_name(&self) -> #bitfield_ty { - unsafe { - ::#prefix::mem::transmute( - self.#unit_field_ident.get(#offset, #width) - as #bitfield_int_ty - ) - } - } - - #[inline] - #access_spec fn #setter_name(&mut self, val: #bitfield_ty) { - unsafe { - let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); - self.#unit_field_ident.set( - #offset, - #width, - val as u64 - ) - } - } - })); - } - } -} - -impl CodeGenerator for CompInfo { - type Extra = Item; - type Return = (); - - fn codegen<'a>( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'a>, - item: &Item, - ) { - debug!("::codegen: item = {:?}", item); - debug_assert!(item.is_enabled_for_codegen(ctx)); - - // Don't output classes with template parameters that aren't types, and - // also don't output template specializations, neither total or partial. - if self.has_non_type_template_params() { - return; - } - - let ty = item.expect_type(); - let layout = ty.layout(ctx); - let mut packed = self.is_packed(ctx, layout.as_ref()); - - let canonical_name = item.canonical_name(ctx); - let canonical_ident = ctx.rust_ident(&canonical_name); - - // Generate the vtable from the method list if appropriate. - // - // TODO: I don't know how this could play with virtual methods that are - // not in the list of methods found by us, we'll see. Also, could the - // order of the vtable pointers vary? - // - // FIXME: Once we generate proper vtables, we need to codegen the - // vtable, but *not* generate a field for it in the case that - // HasVtable::has_vtable_ptr is false but HasVtable::has_vtable is true. - // - // Also, we need to generate the vtable in such a way it "inherits" from - // the parent too. - let is_opaque = item.is_opaque(ctx, &()); - let mut fields = vec![]; - let mut struct_layout = - StructLayoutTracker::new(ctx, self, ty, &canonical_name); - - if !is_opaque { - if item.has_vtable_ptr(ctx) { - let vtable = - Vtable::new(item.id(), self.methods(), self.base_members()); - vtable.codegen(ctx, result, item); - - let vtable_type = vtable - .try_to_rust_ty(ctx, &()) - .expect("vtable to Rust type conversion is infallible") - .to_ptr(true); - - fields.push(quote! { - pub vtable_: #vtable_type , - }); - - struct_layout.saw_vtable(); - } - - for base in self.base_members() { - if !base.requires_storage(ctx) { - continue; - } - - let inner_item = ctx.resolve_item(base.ty); - let mut inner = inner_item.to_rust_ty_or_opaque(ctx, &()); - inner.append_implicit_template_params(ctx, inner_item); - let field_name = ctx.rust_ident(&base.field_name); - - struct_layout.saw_base(inner_item.expect_type()); - - let access_spec = access_specifier(ctx, base.is_public()); - fields.push(quote! { - #access_spec #field_name: #inner, - }); - } - } - - let mut methods = vec![]; - if !is_opaque { - let codegen_depth = item.codegen_depth(ctx); - let fields_should_be_private = - item.annotations().private_fields().unwrap_or(false); - let struct_accessor_kind = item - .annotations() - .accessor_kind() - .unwrap_or(FieldAccessorKind::None); - for field in self.fields() { - field.codegen( - ctx, - fields_should_be_private, - codegen_depth, - struct_accessor_kind, - self, - result, - &mut struct_layout, - &mut fields, - &mut methods, - (), - ); - } - // Check whether an explicit padding field is needed - // at the end. - if let Some(comp_layout) = layout { - fields.extend( - struct_layout - .add_tail_padding(&canonical_name, comp_layout), - ); - } - } - - if is_opaque { - // Opaque item should not have generated methods, fields. - debug_assert!(fields.is_empty()); - debug_assert!(methods.is_empty()); - } - - let is_union = self.kind() == CompKind::Union; - let layout = item.kind().expect_type().layout(ctx); - let zero_sized = item.is_zero_sized(ctx); - let forward_decl = self.is_forward_declaration(); - - let mut explicit_align = None; - - // C++ requires every struct to be addressable, so what C++ compilers do - // is making the struct 1-byte sized. - // - // This is apparently not the case for C, see: - // https://github.com/rust-lang/rust-bindgen/issues/551 - // - // Just get the layout, and assume C++ if not. - // - // NOTE: This check is conveniently here to avoid the dummy fields we - // may add for unused template parameters. - if !forward_decl && zero_sized { - let has_address = if is_opaque { - // Generate the address field if it's an opaque type and - // couldn't determine the layout of the blob. - layout.is_none() - } else { - layout.map_or(true, |l| l.size != 0) - }; - - if has_address { - let layout = Layout::new(1, 1); - let ty = helpers::blob(ctx, Layout::new(1, 1)); - struct_layout.saw_field_with_layout( - "_address", - layout, - /* offset = */ Some(0), - ); - fields.push(quote! { - pub _address: #ty, - }); - } - } - - if is_opaque { - match layout { - Some(l) => { - explicit_align = Some(l.align); - - let ty = helpers::blob(ctx, l); - fields.push(quote! { - pub _bindgen_opaque_blob: #ty , - }); - } - None => { - warn!("Opaque type without layout! Expect dragons!"); - } - } - } else if !is_union && !zero_sized { - if let Some(padding_field) = - layout.and_then(|layout| struct_layout.pad_struct(layout)) - { - fields.push(padding_field); - } - - if let Some(layout) = layout { - if struct_layout.requires_explicit_align(layout) { - if layout.align == 1 { - packed = true; - } else { - explicit_align = Some(layout.align); - if !ctx.options().rust_features.repr_align { - let ty = helpers::blob( - ctx, - Layout::new(0, layout.align), - ); - fields.push(quote! { - pub __bindgen_align: #ty , - }); - } - } - } - } - } else if is_union && !forward_decl { - // TODO(emilio): It'd be nice to unify this with the struct path - // above somehow. - let layout = layout.expect("Unable to get layout information?"); - if struct_layout.requires_explicit_align(layout) { - explicit_align = Some(layout.align); - } - - if !struct_layout.is_rust_union() { - let ty = helpers::blob(ctx, layout); - fields.push(quote! { - pub bindgen_union_field: #ty , - }) - } - } - - if forward_decl { - fields.push(quote! { - _unused: [u8; 0], - }); - } - - let mut generic_param_names = vec![]; - - for (idx, ty) in item.used_template_params(ctx).iter().enumerate() { - let param = ctx.resolve_type(*ty); - let name = param.name().unwrap(); - let ident = ctx.rust_ident(name); - generic_param_names.push(ident.clone()); - - let prefix = ctx.trait_prefix(); - let field_name = ctx.rust_ident(format!("_phantom_{}", idx)); - fields.push(quote! { - pub #field_name : ::#prefix::marker::PhantomData< - ::#prefix::cell::UnsafeCell<#ident> - > , - }); - } - - let generics = if !generic_param_names.is_empty() { - let generic_param_names = generic_param_names.clone(); - quote! { - < #( #generic_param_names ),* > - } - } else { - quote! {} - }; - - let mut attributes = vec![]; - let mut needs_clone_impl = false; - let mut needs_default_impl = false; - let mut needs_debug_impl = false; - let mut needs_partialeq_impl = false; - if let Some(comment) = item.comment(ctx) { - attributes.push(attributes::doc(comment)); - } - if packed && !is_opaque { - let n = layout.map_or(1, |l| l.align); - assert!(ctx.options().rust_features().repr_packed_n || n == 1); - let packed_repr = if n == 1 { - "packed".to_string() - } else { - format!("packed({})", n) - }; - attributes.push(attributes::repr_list(&["C", &packed_repr])); - } else { - attributes.push(attributes::repr("C")); - } - - if ctx.options().rust_features().repr_align { - if let Some(explicit) = explicit_align { - // Ensure that the struct has the correct alignment even in - // presence of alignas. - let explicit = helpers::ast_ty::int_expr(explicit as i64); - attributes.push(quote! { - #[repr(align(#explicit))] - }); - } - } - - let derivable_traits = derives_of_item(item, ctx); - if !derivable_traits.contains(DerivableTraits::DEBUG) { - needs_debug_impl = ctx.options().derive_debug && - ctx.options().impl_debug && - !ctx.no_debug_by_name(item) && - !item.annotations().disallow_debug(); - } - - if !derivable_traits.contains(DerivableTraits::DEFAULT) { - needs_default_impl = ctx.options().derive_default && - !self.is_forward_declaration() && - !ctx.no_default_by_name(item) && - !item.annotations().disallow_default(); - } - - let all_template_params = item.all_template_params(ctx); - - if derivable_traits.contains(DerivableTraits::COPY) && - !derivable_traits.contains(DerivableTraits::CLONE) - { - needs_clone_impl = true; - } - - if !derivable_traits.contains(DerivableTraits::PARTIAL_EQ) { - needs_partialeq_impl = ctx.options().derive_partialeq && - ctx.options().impl_partialeq && - ctx.lookup_can_derive_partialeq_or_partialord(item.id()) == - CanDerive::Manually; - } - - let mut derives: Vec<_> = derivable_traits.into(); - derives.extend(item.annotations().derives().iter().map(String::as_str)); - - // The custom derives callback may return a list of derive attributes; - // add them to the end of the list. - let custom_derives; - if let Some(cb) = &ctx.options().parse_callbacks { - custom_derives = cb.add_derives(&canonical_name); - // In most cases this will be a no-op, since custom_derives will be empty. - derives.extend(custom_derives.iter().map(|s| s.as_str())); - }; - - if !derives.is_empty() { - attributes.push(attributes::derives(&derives)) - } - - if item.annotations().must_use_type() || ctx.must_use_type_by_name(item) - { - attributes.push(attributes::must_use()); - } - - let mut tokens = if is_union && struct_layout.is_rust_union() { - quote! { - #( #attributes )* - pub union #canonical_ident - } - } else { - quote! { - #( #attributes )* - pub struct #canonical_ident - } - }; - - tokens.append_all(quote! { - #generics { - #( #fields )* - } - }); - result.push(tokens); - - // Generate the inner types and all that stuff. - // - // TODO: In the future we might want to be smart, and use nested - // modules, and whatnot. - for ty in self.inner_types() { - let child_item = ctx.resolve_item(*ty); - // assert_eq!(child_item.parent_id(), item.id()); - child_item.codegen(ctx, result, &()); - } - - // NOTE: Some unexposed attributes (like alignment attributes) may - // affect layout, so we're bad and pray to the gods for avoid sending - // all the tests to shit when parsing things like max_align_t. - if self.found_unknown_attr() { - warn!( - "Type {} has an unknown attribute that may affect layout", - canonical_ident - ); - } - - if all_template_params.is_empty() { - if !is_opaque { - for var in self.inner_vars() { - ctx.resolve_item(*var).codegen(ctx, result, &()); - } - } - - if ctx.options().layout_tests && !self.is_forward_declaration() { - if let Some(layout) = layout { - let fn_name = - format!("bindgen_test_layout_{}", canonical_ident); - let fn_name = ctx.rust_ident_raw(fn_name); - let prefix = ctx.trait_prefix(); - let size_of_expr = quote! { - ::#prefix::mem::size_of::<#canonical_ident>() - }; - let align_of_expr = quote! { - ::#prefix::mem::align_of::<#canonical_ident>() - }; - let size = layout.size; - let align = layout.align; - - let check_struct_align = if align > - ctx.target_pointer_size() && - !ctx.options().rust_features().repr_align - { - None - } else { - Some(quote! { - assert_eq!(#align_of_expr, - #align, - concat!("Alignment of ", stringify!(#canonical_ident))); - - }) - }; - - // FIXME when [issue #465](https://github.com/rust-lang/rust-bindgen/issues/465) ready - let too_many_base_vtables = self - .base_members() - .iter() - .filter(|base| base.ty.has_vtable(ctx)) - .count() > - 1; - - let should_skip_field_offset_checks = - is_opaque || too_many_base_vtables; - - let check_field_offset = if should_skip_field_offset_checks - { - vec![] - } else { - let asserts = self.fields() - .iter() - .filter_map(|field| match *field { - Field::DataMember(ref f) if f.name().is_some() => Some(f), - _ => None, - }) - .flat_map(|field| { - let name = field.name().unwrap(); - field.offset().map(|offset| { - let field_offset = offset / 8; - let field_name = ctx.rust_ident(name); - - quote! { - assert_eq!( - unsafe { - &(*(::#prefix::ptr::null::<#canonical_ident>())).#field_name as *const _ as usize - }, - #field_offset, - concat!("Offset of field: ", stringify!(#canonical_ident), "::", stringify!(#field_name)) - ); - } - }) - }) - .collect::>(); - - asserts - }; - - let item = quote! { - #[test] - fn #fn_name() { - assert_eq!(#size_of_expr, - #size, - concat!("Size of: ", stringify!(#canonical_ident))); - - #check_struct_align - #( #check_field_offset )* - } - }; - result.push(item); - } - } - - let mut method_names = Default::default(); - if ctx.options().codegen_config.methods() { - for method in self.methods() { - assert!(method.kind() != MethodKind::Constructor); - method.codegen_method( - ctx, - &mut methods, - &mut method_names, - result, - self, - ); - } - } - - if ctx.options().codegen_config.constructors() { - for sig in self.constructors() { - Method::new( - MethodKind::Constructor, - *sig, - /* const */ - false, - ) - .codegen_method( - ctx, - &mut methods, - &mut method_names, - result, - self, - ); - } - } - - if ctx.options().codegen_config.destructors() { - if let Some((kind, destructor)) = self.destructor() { - debug_assert!(kind.is_destructor()); - Method::new(kind, destructor, false).codegen_method( - ctx, - &mut methods, - &mut method_names, - result, - self, - ); - } - } - } - - // NB: We can't use to_rust_ty here since for opaque types this tries to - // use the specialization knowledge to generate a blob field. - let ty_for_impl = quote! { - #canonical_ident #generics - }; - - if needs_clone_impl { - result.push(quote! { - impl #generics Clone for #ty_for_impl { - fn clone(&self) -> Self { *self } - } - }); - } - - if needs_default_impl { - let prefix = ctx.trait_prefix(); - let body = if ctx.options().rust_features().maybe_uninit { - quote! { - let mut s = ::#prefix::mem::MaybeUninit::::uninit(); - unsafe { - ::#prefix::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } - } else { - quote! { - unsafe { - let mut s: Self = ::#prefix::mem::uninitialized(); - ::#prefix::ptr::write_bytes(&mut s, 0, 1); - s - } - } - }; - // Note we use `ptr::write_bytes()` instead of `mem::zeroed()` because the latter does - // not necessarily ensure padding bytes are zeroed. Some C libraries are sensitive to - // non-zero padding bytes, especially when forwards/backwards compatability is - // involved. - result.push(quote! { - impl #generics Default for #ty_for_impl { - fn default() -> Self { - #body - } - } - }); - } - - if needs_debug_impl { - let impl_ = impl_debug::gen_debug_impl( - ctx, - self.fields(), - item, - self.kind(), - ); - - let prefix = ctx.trait_prefix(); - - result.push(quote! { - impl #generics ::#prefix::fmt::Debug for #ty_for_impl { - #impl_ - } - }); - } - - if needs_partialeq_impl { - if let Some(impl_) = impl_partialeq::gen_partialeq_impl( - ctx, - self, - item, - &ty_for_impl, - ) { - let partialeq_bounds = if !generic_param_names.is_empty() { - let bounds = generic_param_names.iter().map(|t| { - quote! { #t: PartialEq } - }); - quote! { where #( #bounds ),* } - } else { - quote! {} - }; - - let prefix = ctx.trait_prefix(); - result.push(quote! { - impl #generics ::#prefix::cmp::PartialEq for #ty_for_impl #partialeq_bounds { - #impl_ - } - }); - } - } - - if !methods.is_empty() { - result.push(quote! { - impl #generics #ty_for_impl { - #( #methods )* - } - }); - } - } -} - -trait MethodCodegen { - fn codegen_method<'a>( - &self, - ctx: &BindgenContext, - methods: &mut Vec, - method_names: &mut HashMap, - result: &mut CodegenResult<'a>, - parent: &CompInfo, - ); -} - -impl MethodCodegen for Method { - fn codegen_method<'a>( - &self, - ctx: &BindgenContext, - methods: &mut Vec, - method_names: &mut HashMap, - result: &mut CodegenResult<'a>, - _parent: &CompInfo, - ) { - assert!({ - let cc = &ctx.options().codegen_config; - match self.kind() { - MethodKind::Constructor => cc.constructors(), - MethodKind::Destructor => cc.destructors(), - MethodKind::VirtualDestructor { .. } => cc.destructors(), - MethodKind::Static | - MethodKind::Normal | - MethodKind::Virtual { .. } => cc.methods(), - } - }); - - // TODO(emilio): We could generate final stuff at least. - if self.is_virtual() { - return; // FIXME - } - - // First of all, output the actual function. - let function_item = ctx.resolve_item(self.signature()); - if !function_item.process_before_codegen(ctx, result) { - return; - } - let function = function_item.expect_function(); - let times_seen = function.codegen(ctx, result, function_item); - let times_seen = match times_seen { - Some(seen) => seen, - None => return, - }; - let signature_item = ctx.resolve_item(function.signature()); - let mut name = match self.kind() { - MethodKind::Constructor => "new".into(), - MethodKind::Destructor => "destruct".into(), - _ => function.name().to_owned(), - }; - - let signature = match *signature_item.expect_type().kind() { - TypeKind::Function(ref sig) => sig, - _ => panic!("How in the world?"), - }; - - if let (Abi::ThisCall, false) = - (signature.abi(), ctx.options().rust_features().thiscall_abi) - { - return; - } - - // Do not generate variadic methods, since rust does not allow - // implementing them, and we don't do a good job at it anyway. - if signature.is_variadic() { - return; - } - - let count = { - let count = method_names.entry(name.clone()).or_insert(0); - *count += 1; - *count - 1 - }; - - if count != 0 { - name.push_str(&count.to_string()); - } - - let mut function_name = function_item.canonical_name(ctx); - if times_seen > 0 { - write!(&mut function_name, "{}", times_seen).unwrap(); - } - let function_name = ctx.rust_ident(function_name); - let mut args = utils::fnsig_arguments(ctx, signature); - let mut ret = utils::fnsig_return_ty(ctx, signature); - - if !self.is_static() && !self.is_constructor() { - args[0] = if self.is_const() { - quote! { &self } - } else { - quote! { &mut self } - }; - } - - // If it's a constructor, we always return `Self`, and we inject the - // "this" parameter, so there's no need to ask the user for it. - // - // Note that constructors in Clang are represented as functions with - // return-type = void. - if self.is_constructor() { - args.remove(0); - ret = quote! { -> Self }; - } - - let mut exprs = - helpers::ast_ty::arguments_from_signature(signature, ctx); - - let mut stmts = vec![]; - - // If it's a constructor, we need to insert an extra parameter with a - // variable called `__bindgen_tmp` we're going to create. - if self.is_constructor() { - let prefix = ctx.trait_prefix(); - let tmp_variable_decl = if ctx - .options() - .rust_features() - .maybe_uninit - { - exprs[0] = quote! { - __bindgen_tmp.as_mut_ptr() - }; - quote! { - let mut __bindgen_tmp = ::#prefix::mem::MaybeUninit::uninit() - } - } else { - exprs[0] = quote! { - &mut __bindgen_tmp - }; - quote! { - let mut __bindgen_tmp = ::#prefix::mem::uninitialized() - } - }; - stmts.push(tmp_variable_decl); - } else if !self.is_static() { - assert!(!exprs.is_empty()); - exprs[0] = quote! { - self - }; - }; - - let call = quote! { - #function_name (#( #exprs ),* ) - }; - - stmts.push(call); - - if self.is_constructor() { - stmts.push(if ctx.options().rust_features().maybe_uninit { - quote! { - __bindgen_tmp.assume_init() - } - } else { - quote! { - __bindgen_tmp - } - }) - } - - let block = quote! { - #( #stmts );* - }; - - let mut attrs = vec![attributes::inline()]; - - if signature.must_use() && - ctx.options().rust_features().must_use_function - { - attrs.push(attributes::must_use()); - } - - let name = ctx.rust_ident(&name); - methods.push(quote! { - #(#attrs)* - pub unsafe fn #name ( #( #args ),* ) #ret { - #block - } - }); - } -} - -/// A helper type that represents different enum variations. -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum EnumVariation { - /// The code for this enum will use a Rust enum. Note that creating this in unsafe code - /// (including FFI) with an invalid value will invoke undefined behaviour, whether or not - /// its marked as non_exhaustive. - Rust { - /// Indicates whether the generated struct should be `#[non_exhaustive]` - non_exhaustive: bool, - }, - /// The code for this enum will use a newtype - NewType { - /// Indicates whether the newtype will have bitwise operators - is_bitfield: bool, - }, - /// The code for this enum will use consts - Consts, - /// The code for this enum will use a module containing consts - ModuleConsts, -} - -impl EnumVariation { - fn is_rust(&self) -> bool { - matches!(*self, EnumVariation::Rust { .. }) - } - - /// Both the `Const` and `ModuleConsts` variants will cause this to return - /// true. - fn is_const(&self) -> bool { - matches!(*self, EnumVariation::Consts | EnumVariation::ModuleConsts) - } -} - -impl Default for EnumVariation { - fn default() -> EnumVariation { - EnumVariation::Consts - } -} - -impl std::str::FromStr for EnumVariation { - type Err = std::io::Error; - - /// Create a `EnumVariation` from a string. - fn from_str(s: &str) -> Result { - match s { - "rust" => Ok(EnumVariation::Rust { - non_exhaustive: false, - }), - "rust_non_exhaustive" => Ok(EnumVariation::Rust { - non_exhaustive: true, - }), - "bitfield" => Ok(EnumVariation::NewType { is_bitfield: true }), - "consts" => Ok(EnumVariation::Consts), - "moduleconsts" => Ok(EnumVariation::ModuleConsts), - "newtype" => Ok(EnumVariation::NewType { is_bitfield: false }), - _ => Err(std::io::Error::new( - std::io::ErrorKind::InvalidInput, - concat!( - "Got an invalid EnumVariation. Accepted values ", - "are 'rust', 'rust_non_exhaustive', 'bitfield', 'consts',", - "'moduleconsts', and 'newtype'." - ), - )), - } - } -} - -/// A helper type to construct different enum variations. -enum EnumBuilder<'a> { - Rust { - codegen_depth: usize, - attrs: Vec, - ident: Ident, - tokens: proc_macro2::TokenStream, - emitted_any_variants: bool, - }, - NewType { - codegen_depth: usize, - canonical_name: &'a str, - tokens: proc_macro2::TokenStream, - is_bitfield: bool, - }, - Consts { - repr: proc_macro2::TokenStream, - variants: Vec, - codegen_depth: usize, - }, - ModuleConsts { - codegen_depth: usize, - module_name: &'a str, - module_items: Vec, - }, -} - -impl<'a> EnumBuilder<'a> { - /// Returns the depth of the code generation for a variant of this enum. - fn codegen_depth(&self) -> usize { - match *self { - EnumBuilder::Rust { codegen_depth, .. } | - EnumBuilder::NewType { codegen_depth, .. } | - EnumBuilder::ModuleConsts { codegen_depth, .. } | - EnumBuilder::Consts { codegen_depth, .. } => codegen_depth, - } - } - - /// Returns true if the builder is for a rustified enum. - fn is_rust_enum(&self) -> bool { - matches!(*self, EnumBuilder::Rust { .. }) - } - - /// Create a new enum given an item builder, a canonical name, a name for - /// the representation, and which variation it should be generated as. - fn new( - name: &'a str, - mut attrs: Vec, - repr: proc_macro2::TokenStream, - enum_variation: EnumVariation, - enum_codegen_depth: usize, - ) -> Self { - let ident = Ident::new(name, Span::call_site()); - - match enum_variation { - EnumVariation::NewType { is_bitfield } => EnumBuilder::NewType { - codegen_depth: enum_codegen_depth, - canonical_name: name, - tokens: quote! { - #( #attrs )* - pub struct #ident (pub #repr); - }, - is_bitfield, - }, - - EnumVariation::Rust { .. } => { - // `repr` is guaranteed to be Rustified in Enum::codegen - attrs.insert(0, quote! { #[repr( #repr )] }); - let tokens = quote!(); - EnumBuilder::Rust { - codegen_depth: enum_codegen_depth + 1, - attrs, - ident, - tokens, - emitted_any_variants: false, - } - } - - EnumVariation::Consts => { - let mut variants = Vec::new(); - - variants.push(quote! { - #( #attrs )* - pub type #ident = #repr; - }); - - EnumBuilder::Consts { - repr, - variants, - codegen_depth: enum_codegen_depth, - } - } - - EnumVariation::ModuleConsts => { - let ident = Ident::new( - CONSTIFIED_ENUM_MODULE_REPR_NAME, - Span::call_site(), - ); - let type_definition = quote! { - #( #attrs )* - pub type #ident = #repr; - }; - - EnumBuilder::ModuleConsts { - codegen_depth: enum_codegen_depth + 1, - module_name: name, - module_items: vec![type_definition], - } - } - } - } - - /// Add a variant to this enum. - fn with_variant<'b>( - self, - ctx: &BindgenContext, - variant: &EnumVariant, - mangling_prefix: Option<&str>, - rust_ty: proc_macro2::TokenStream, - result: &mut CodegenResult<'b>, - is_ty_named: bool, - ) -> Self { - let variant_name = ctx.rust_mangle(variant.name()); - let is_rust_enum = self.is_rust_enum(); - let expr = match variant.val() { - EnumVariantValue::Boolean(v) if is_rust_enum => { - helpers::ast_ty::uint_expr(v as u64) - } - EnumVariantValue::Boolean(v) => quote!(#v), - EnumVariantValue::Signed(v) => helpers::ast_ty::int_expr(v), - EnumVariantValue::Unsigned(v) => helpers::ast_ty::uint_expr(v), - }; - - let mut doc = quote! {}; - if ctx.options().generate_comments { - if let Some(raw_comment) = variant.comment() { - let comment = - comment::preprocess(raw_comment, self.codegen_depth()); - doc = attributes::doc(comment); - } - } - - match self { - EnumBuilder::Rust { - attrs, - ident, - tokens, - emitted_any_variants: _, - codegen_depth, - } => { - let name = ctx.rust_ident(variant_name); - EnumBuilder::Rust { - attrs, - ident, - codegen_depth, - tokens: quote! { - #tokens - #doc - #name = #expr, - }, - emitted_any_variants: true, - } - } - - EnumBuilder::NewType { canonical_name, .. } => { - if ctx.options().rust_features().associated_const && is_ty_named - { - let enum_ident = ctx.rust_ident(canonical_name); - let variant_ident = ctx.rust_ident(variant_name); - result.push(quote! { - impl #enum_ident { - #doc - pub const #variant_ident : #rust_ty = #rust_ty ( #expr ); - } - }); - } else { - let ident = ctx.rust_ident(match mangling_prefix { - Some(prefix) => { - Cow::Owned(format!("{}_{}", prefix, variant_name)) - } - None => variant_name, - }); - result.push(quote! { - #doc - pub const #ident : #rust_ty = #rust_ty ( #expr ); - }); - } - - self - } - - EnumBuilder::Consts { ref repr, .. } => { - let constant_name = match mangling_prefix { - Some(prefix) => { - Cow::Owned(format!("{}_{}", prefix, variant_name)) - } - None => variant_name, - }; - - let ty = if is_ty_named { &rust_ty } else { repr }; - - let ident = ctx.rust_ident(constant_name); - result.push(quote! { - #doc - pub const #ident : #ty = #expr ; - }); - - self - } - EnumBuilder::ModuleConsts { - codegen_depth, - module_name, - mut module_items, - } => { - let name = ctx.rust_ident(variant_name); - let ty = ctx.rust_ident(CONSTIFIED_ENUM_MODULE_REPR_NAME); - module_items.push(quote! { - #doc - pub const #name : #ty = #expr ; - }); - - EnumBuilder::ModuleConsts { - module_name, - module_items, - codegen_depth, - } - } - } - } - - fn build<'b>( - self, - ctx: &BindgenContext, - rust_ty: proc_macro2::TokenStream, - result: &mut CodegenResult<'b>, - ) -> proc_macro2::TokenStream { - match self { - EnumBuilder::Rust { - attrs, - ident, - tokens, - emitted_any_variants, - .. - } => { - let variants = if !emitted_any_variants { - quote!(__bindgen_cannot_repr_c_on_empty_enum = 0) - } else { - tokens - }; - - quote! { - #( #attrs )* - pub enum #ident { - #variants - } - } - } - EnumBuilder::NewType { - canonical_name, - tokens, - is_bitfield, - .. - } => { - if !is_bitfield { - return tokens; - } - - let rust_ty_name = ctx.rust_ident_raw(canonical_name); - let prefix = ctx.trait_prefix(); - - result.push(quote! { - impl ::#prefix::ops::BitOr<#rust_ty> for #rust_ty { - type Output = Self; - - #[inline] - fn bitor(self, other: Self) -> Self { - #rust_ty_name(self.0 | other.0) - } - } - }); - - result.push(quote! { - impl ::#prefix::ops::BitOrAssign for #rust_ty { - #[inline] - fn bitor_assign(&mut self, rhs: #rust_ty) { - self.0 |= rhs.0; - } - } - }); - - result.push(quote! { - impl ::#prefix::ops::BitAnd<#rust_ty> for #rust_ty { - type Output = Self; - - #[inline] - fn bitand(self, other: Self) -> Self { - #rust_ty_name(self.0 & other.0) - } - } - }); - - result.push(quote! { - impl ::#prefix::ops::BitAndAssign for #rust_ty { - #[inline] - fn bitand_assign(&mut self, rhs: #rust_ty) { - self.0 &= rhs.0; - } - } - }); - - tokens - } - EnumBuilder::Consts { variants, .. } => quote! { #( #variants )* }, - EnumBuilder::ModuleConsts { - module_items, - module_name, - .. - } => { - let ident = ctx.rust_ident(module_name); - quote! { - pub mod #ident { - #( #module_items )* - } - } - } - } - } -} - -impl CodeGenerator for Enum { - type Extra = Item; - type Return = (); - - fn codegen<'a>( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'a>, - item: &Item, - ) { - debug!("::codegen: item = {:?}", item); - debug_assert!(item.is_enabled_for_codegen(ctx)); - - let name = item.canonical_name(ctx); - let ident = ctx.rust_ident(&name); - let enum_ty = item.expect_type(); - let layout = enum_ty.layout(ctx); - let variation = self.computed_enum_variation(ctx, item); - - let repr_translated; - let repr = match self.repr().map(|repr| ctx.resolve_type(repr)) { - Some(repr) - if !ctx.options().translate_enum_integer_types && - !variation.is_rust() => - { - repr - } - repr => { - // An enum's integer type is translated to a native Rust - // integer type in 3 cases: - // * the enum is Rustified and we need a translated type for - // the repr attribute - // * the representation couldn't be determined from the C source - // * it was explicitly requested as a bindgen option - - let kind = match repr { - Some(repr) => match *repr.canonical_type(ctx).kind() { - TypeKind::Int(int_kind) => int_kind, - _ => panic!("Unexpected type as enum repr"), - }, - None => { - warn!( - "Guessing type of enum! Forward declarations of enums \ - shouldn't be legal!" - ); - IntKind::Int - } - }; - - let signed = kind.is_signed(); - let size = layout - .map(|l| l.size) - .or_else(|| kind.known_size()) - .unwrap_or(0); - - let translated = match (signed, size) { - (true, 1) => IntKind::I8, - (false, 1) => IntKind::U8, - (true, 2) => IntKind::I16, - (false, 2) => IntKind::U16, - (true, 4) => IntKind::I32, - (false, 4) => IntKind::U32, - (true, 8) => IntKind::I64, - (false, 8) => IntKind::U64, - _ => { - warn!( - "invalid enum decl: signed: {}, size: {}", - signed, size - ); - IntKind::I32 - } - }; - - repr_translated = - Type::new(None, None, TypeKind::Int(translated), false); - &repr_translated - } - }; - - let mut attrs = vec![]; - - // TODO(emilio): Delegate this to the builders? - match variation { - EnumVariation::Rust { non_exhaustive } => { - if non_exhaustive && - ctx.options().rust_features().non_exhaustive - { - attrs.push(attributes::non_exhaustive()); - } else if non_exhaustive && - !ctx.options().rust_features().non_exhaustive - { - panic!("The rust target you're using doesn't seem to support non_exhaustive enums"); - } - } - EnumVariation::NewType { .. } => { - if ctx.options().rust_features.repr_transparent { - attrs.push(attributes::repr("transparent")); - } else { - attrs.push(attributes::repr("C")); - } - } - _ => {} - }; - - if let Some(comment) = item.comment(ctx) { - attrs.push(attributes::doc(comment)); - } - - if item.annotations().must_use_type() || ctx.must_use_type_by_name(item) - { - attrs.push(attributes::must_use()); - } - - if !variation.is_const() { - let mut derives = derives_of_item(item, ctx); - // For backwards compat, enums always derive Debug/Clone/Eq/PartialEq/Hash, even - // if we don't generate those by default. - if !item.annotations().disallow_debug() { - derives.insert(DerivableTraits::DEBUG); - } - if !item.annotations().disallow_copy() { - derives.insert(DerivableTraits::COPY); - } - derives.insert( - DerivableTraits::CLONE | - DerivableTraits::HASH | - DerivableTraits::PARTIAL_EQ | - DerivableTraits::EQ, - ); - let mut derives: Vec<_> = derives.into(); - for derive in item.annotations().derives().iter() { - if !derives.contains(&derive.as_str()) { - derives.push(derive); - } - } - - // The custom derives callback may return a list of derive attributes; - // add them to the end of the list. - let custom_derives; - if let Some(cb) = &ctx.options().parse_callbacks { - custom_derives = cb.add_derives(&name); - // In most cases this will be a no-op, since custom_derives will be empty. - derives.extend(custom_derives.iter().map(|s| s.as_str())); - }; - - attrs.push(attributes::derives(&derives)); - } - - fn add_constant<'a>( - ctx: &BindgenContext, - enum_: &Type, - // Only to avoid recomputing every time. - enum_canonical_name: &Ident, - // May be the same as "variant" if it's because the - // enum is unnamed and we still haven't seen the - // value. - variant_name: &Ident, - referenced_name: &Ident, - enum_rust_ty: proc_macro2::TokenStream, - result: &mut CodegenResult<'a>, - ) { - let constant_name = if enum_.name().is_some() { - if ctx.options().prepend_enum_name { - format!("{}_{}", enum_canonical_name, variant_name) - } else { - format!("{}", variant_name) - } - } else { - format!("{}", variant_name) - }; - let constant_name = ctx.rust_ident(constant_name); - - result.push(quote! { - pub const #constant_name : #enum_rust_ty = - #enum_canonical_name :: #referenced_name ; - }); - } - - let repr = repr.to_rust_ty_or_opaque(ctx, item); - - let mut builder = EnumBuilder::new( - &name, - attrs, - repr, - variation, - item.codegen_depth(ctx), - ); - - // A map where we keep a value -> variant relation. - let mut seen_values = HashMap::<_, Ident>::default(); - let enum_rust_ty = item.to_rust_ty_or_opaque(ctx, &()); - let is_toplevel = item.is_toplevel(ctx); - - // Used to mangle the constants we generate in the unnamed-enum case. - let parent_canonical_name = if is_toplevel { - None - } else { - Some(item.parent_id().canonical_name(ctx)) - }; - - let constant_mangling_prefix = if ctx.options().prepend_enum_name { - if enum_ty.name().is_none() { - parent_canonical_name.as_deref() - } else { - Some(&*name) - } - } else { - None - }; - - // NB: We defer the creation of constified variants, in case we find - // another variant with the same value (which is the common thing to - // do). - let mut constified_variants = VecDeque::new(); - - let mut iter = self.variants().iter().peekable(); - while let Some(variant) = - iter.next().or_else(|| constified_variants.pop_front()) - { - if variant.hidden() { - continue; - } - - if variant.force_constification() && iter.peek().is_some() { - constified_variants.push_back(variant); - continue; - } - - match seen_values.entry(variant.val()) { - Entry::Occupied(ref entry) => { - if variation.is_rust() { - let variant_name = ctx.rust_mangle(variant.name()); - let mangled_name = - if is_toplevel || enum_ty.name().is_some() { - variant_name - } else { - let parent_name = - parent_canonical_name.as_ref().unwrap(); - - Cow::Owned(format!( - "{}_{}", - parent_name, variant_name - )) - }; - - let existing_variant_name = entry.get(); - // Use associated constants for named enums. - if enum_ty.name().is_some() && - ctx.options().rust_features().associated_const - { - let enum_canonical_name = &ident; - let variant_name = - ctx.rust_ident_raw(&*mangled_name); - result.push(quote! { - impl #enum_rust_ty { - pub const #variant_name : #enum_rust_ty = - #enum_canonical_name :: #existing_variant_name ; - } - }); - } else { - add_constant( - ctx, - enum_ty, - &ident, - &Ident::new(&*mangled_name, Span::call_site()), - existing_variant_name, - enum_rust_ty.clone(), - result, - ); - } - } else { - builder = builder.with_variant( - ctx, - variant, - constant_mangling_prefix, - enum_rust_ty.clone(), - result, - enum_ty.name().is_some(), - ); - } - } - Entry::Vacant(entry) => { - builder = builder.with_variant( - ctx, - variant, - constant_mangling_prefix, - enum_rust_ty.clone(), - result, - enum_ty.name().is_some(), - ); - - let variant_name = ctx.rust_ident(variant.name()); - - // If it's an unnamed enum, or constification is enforced, - // we also generate a constant so it can be properly - // accessed. - if (variation.is_rust() && enum_ty.name().is_none()) || - variant.force_constification() - { - let mangled_name = if is_toplevel { - variant_name.clone() - } else { - let parent_name = - parent_canonical_name.as_ref().unwrap(); - - Ident::new( - &format!("{}_{}", parent_name, variant_name), - Span::call_site(), - ) - }; - - add_constant( - ctx, - enum_ty, - &ident, - &mangled_name, - &variant_name, - enum_rust_ty.clone(), - result, - ); - } - - entry.insert(variant_name); - } - } - } - - let item = builder.build(ctx, enum_rust_ty, result); - result.push(item); - } -} - -/// Enum for the default type of macro constants. -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum MacroTypeVariation { - /// Use i32 or i64 - Signed, - /// Use u32 or u64 - Unsigned, -} - -impl MacroTypeVariation { - /// Convert a `MacroTypeVariation` to its str representation. - pub fn as_str(&self) -> &str { - match self { - MacroTypeVariation::Signed => "signed", - MacroTypeVariation::Unsigned => "unsigned", - } - } -} - -impl Default for MacroTypeVariation { - fn default() -> MacroTypeVariation { - MacroTypeVariation::Unsigned - } -} - -impl std::str::FromStr for MacroTypeVariation { - type Err = std::io::Error; - - /// Create a `MacroTypeVariation` from a string. - fn from_str(s: &str) -> Result { - match s { - "signed" => Ok(MacroTypeVariation::Signed), - "unsigned" => Ok(MacroTypeVariation::Unsigned), - _ => Err(std::io::Error::new( - std::io::ErrorKind::InvalidInput, - concat!( - "Got an invalid MacroTypeVariation. Accepted values ", - "are 'signed' and 'unsigned'" - ), - )), - } - } -} - -/// Enum for how aliases should be translated. -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum AliasVariation { - /// Convert to regular Rust alias - TypeAlias, - /// Create a new type by wrapping the old type in a struct and using #[repr(transparent)] - NewType, - /// Same as NewStruct but also impl Deref to be able to use the methods of the wrapped type - NewTypeDeref, -} - -impl AliasVariation { - /// Convert an `AliasVariation` to its str representation. - pub fn as_str(&self) -> &str { - match self { - AliasVariation::TypeAlias => "type_alias", - AliasVariation::NewType => "new_type", - AliasVariation::NewTypeDeref => "new_type_deref", - } - } -} - -impl Default for AliasVariation { - fn default() -> AliasVariation { - AliasVariation::TypeAlias - } -} - -impl std::str::FromStr for AliasVariation { - type Err = std::io::Error; - - /// Create an `AliasVariation` from a string. - fn from_str(s: &str) -> Result { - match s { - "type_alias" => Ok(AliasVariation::TypeAlias), - "new_type" => Ok(AliasVariation::NewType), - "new_type_deref" => Ok(AliasVariation::NewTypeDeref), - _ => Err(std::io::Error::new( - std::io::ErrorKind::InvalidInput, - concat!( - "Got an invalid AliasVariation. Accepted values ", - "are 'type_alias', 'new_type', and 'new_type_deref'" - ), - )), - } - } -} - -/// Fallible conversion to an opaque blob. -/// -/// Implementors of this trait should provide the `try_get_layout` method to -/// fallibly get this thing's layout, which the provided `try_to_opaque` trait -/// method will use to convert the `Layout` into an opaque blob Rust type. -trait TryToOpaque { - type Extra; - - /// Get the layout for this thing, if one is available. - fn try_get_layout( - &self, - ctx: &BindgenContext, - extra: &Self::Extra, - ) -> error::Result; - - /// Do not override this provided trait method. - fn try_to_opaque( - &self, - ctx: &BindgenContext, - extra: &Self::Extra, - ) -> error::Result { - self.try_get_layout(ctx, extra) - .map(|layout| helpers::blob(ctx, layout)) - } -} - -/// Infallible conversion of an IR thing to an opaque blob. -/// -/// The resulting layout is best effort, and is unfortunately not guaranteed to -/// be correct. When all else fails, we fall back to a single byte layout as a -/// last resort, because C++ does not permit zero-sized types. See the note in -/// the `ToRustTyOrOpaque` doc comment about fallible versus infallible traits -/// and when each is appropriate. -/// -/// Don't implement this directly. Instead implement `TryToOpaque`, and then -/// leverage the blanket impl for this trait. -trait ToOpaque: TryToOpaque { - fn get_layout(&self, ctx: &BindgenContext, extra: &Self::Extra) -> Layout { - self.try_get_layout(ctx, extra) - .unwrap_or_else(|_| Layout::for_size(ctx, 1)) - } - - fn to_opaque( - &self, - ctx: &BindgenContext, - extra: &Self::Extra, - ) -> proc_macro2::TokenStream { - let layout = self.get_layout(ctx, extra); - helpers::blob(ctx, layout) - } -} - -impl ToOpaque for T where T: TryToOpaque {} - -/// Fallible conversion from an IR thing to an *equivalent* Rust type. -/// -/// If the C/C++ construct represented by the IR thing cannot (currently) be -/// represented in Rust (for example, instantiations of templates with -/// const-value generic parameters) then the impl should return an `Err`. It -/// should *not* attempt to return an opaque blob with the correct size and -/// alignment. That is the responsibility of the `TryToOpaque` trait. -trait TryToRustTy { - type Extra; - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - extra: &Self::Extra, - ) -> error::Result; -} - -/// Fallible conversion to a Rust type or an opaque blob with the correct size -/// and alignment. -/// -/// Don't implement this directly. Instead implement `TryToRustTy` and -/// `TryToOpaque`, and then leverage the blanket impl for this trait below. -trait TryToRustTyOrOpaque: TryToRustTy + TryToOpaque { - type Extra; - - fn try_to_rust_ty_or_opaque( - &self, - ctx: &BindgenContext, - extra: &::Extra, - ) -> error::Result; -} - -impl TryToRustTyOrOpaque for T -where - T: TryToRustTy + TryToOpaque, -{ - type Extra = E; - - fn try_to_rust_ty_or_opaque( - &self, - ctx: &BindgenContext, - extra: &E, - ) -> error::Result { - self.try_to_rust_ty(ctx, extra).or_else(|_| { - if let Ok(layout) = self.try_get_layout(ctx, extra) { - Ok(helpers::blob(ctx, layout)) - } else { - Err(error::Error::NoLayoutForOpaqueBlob) - } - }) - } -} - -/// Infallible conversion to a Rust type, or an opaque blob with a best effort -/// of correct size and alignment. -/// -/// Don't implement this directly. Instead implement `TryToRustTy` and -/// `TryToOpaque`, and then leverage the blanket impl for this trait below. -/// -/// ### Fallible vs. Infallible Conversions to Rust Types -/// -/// When should one use this infallible `ToRustTyOrOpaque` trait versus the -/// fallible `TryTo{RustTy, Opaque, RustTyOrOpaque}` triats? All fallible trait -/// implementations that need to convert another thing into a Rust type or -/// opaque blob in a nested manner should also use fallible trait methods and -/// propagate failure up the stack. Only infallible functions and methods like -/// CodeGenerator implementations should use the infallible -/// `ToRustTyOrOpaque`. The further out we push error recovery, the more likely -/// we are to get a usable `Layout` even if we can't generate an equivalent Rust -/// type for a C++ construct. -trait ToRustTyOrOpaque: TryToRustTy + ToOpaque { - type Extra; - - fn to_rust_ty_or_opaque( - &self, - ctx: &BindgenContext, - extra: &::Extra, - ) -> proc_macro2::TokenStream; -} - -impl ToRustTyOrOpaque for T -where - T: TryToRustTy + ToOpaque, -{ - type Extra = E; - - fn to_rust_ty_or_opaque( - &self, - ctx: &BindgenContext, - extra: &E, - ) -> proc_macro2::TokenStream { - self.try_to_rust_ty(ctx, extra) - .unwrap_or_else(|_| self.to_opaque(ctx, extra)) - } -} - -impl TryToOpaque for T -where - T: Copy + Into, -{ - type Extra = (); - - fn try_get_layout( - &self, - ctx: &BindgenContext, - _: &(), - ) -> error::Result { - ctx.resolve_item((*self).into()).try_get_layout(ctx, &()) - } -} - -impl TryToRustTy for T -where - T: Copy + Into, -{ - type Extra = (); - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - _: &(), - ) -> error::Result { - ctx.resolve_item((*self).into()).try_to_rust_ty(ctx, &()) - } -} - -impl TryToOpaque for Item { - type Extra = (); - - fn try_get_layout( - &self, - ctx: &BindgenContext, - _: &(), - ) -> error::Result { - self.kind().expect_type().try_get_layout(ctx, self) - } -} - -impl TryToRustTy for Item { - type Extra = (); - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - _: &(), - ) -> error::Result { - self.kind().expect_type().try_to_rust_ty(ctx, self) - } -} - -impl TryToOpaque for Type { - type Extra = Item; - - fn try_get_layout( - &self, - ctx: &BindgenContext, - _: &Item, - ) -> error::Result { - self.layout(ctx).ok_or(error::Error::NoLayoutForOpaqueBlob) - } -} - -impl TryToRustTy for Type { - type Extra = Item; - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> error::Result { - use self::helpers::ast_ty::*; - - match *self.kind() { - TypeKind::Void => Ok(c_void(ctx)), - // TODO: we should do something smart with nullptr, or maybe *const - // c_void is enough? - TypeKind::NullPtr => Ok(c_void(ctx).to_ptr(true)), - TypeKind::Int(ik) => { - match ik { - IntKind::Bool => Ok(quote! { bool }), - IntKind::Char { .. } => Ok(raw_type(ctx, "c_char")), - IntKind::SChar => Ok(raw_type(ctx, "c_schar")), - IntKind::UChar => Ok(raw_type(ctx, "c_uchar")), - IntKind::Short => Ok(raw_type(ctx, "c_short")), - IntKind::UShort => Ok(raw_type(ctx, "c_ushort")), - IntKind::Int => Ok(raw_type(ctx, "c_int")), - IntKind::UInt => Ok(raw_type(ctx, "c_uint")), - IntKind::Long => Ok(raw_type(ctx, "c_long")), - IntKind::ULong => Ok(raw_type(ctx, "c_ulong")), - IntKind::LongLong => Ok(raw_type(ctx, "c_longlong")), - IntKind::ULongLong => Ok(raw_type(ctx, "c_ulonglong")), - IntKind::WChar => { - let layout = self - .layout(ctx) - .expect("Couldn't compute wchar_t's layout?"); - let ty = Layout::known_type_for_size(ctx, layout.size) - .expect("Non-representable wchar_t?"); - let ident = ctx.rust_ident_raw(ty); - Ok(quote! { #ident }) - } - - IntKind::I8 => Ok(quote! { i8 }), - IntKind::U8 => Ok(quote! { u8 }), - IntKind::I16 => Ok(quote! { i16 }), - IntKind::U16 => Ok(quote! { u16 }), - IntKind::I32 => Ok(quote! { i32 }), - IntKind::U32 => Ok(quote! { u32 }), - IntKind::I64 => Ok(quote! { i64 }), - IntKind::U64 => Ok(quote! { u64 }), - IntKind::Custom { name, .. } => { - Ok(proc_macro2::TokenStream::from_str(name).unwrap()) - } - IntKind::U128 => { - Ok(if ctx.options().rust_features.i128_and_u128 { - quote! { u128 } - } else { - // Best effort thing, but wrong alignment - // unfortunately. - quote! { [u64; 2] } - }) - } - IntKind::I128 => { - Ok(if ctx.options().rust_features.i128_and_u128 { - quote! { i128 } - } else { - quote! { [u64; 2] } - }) - } - } - } - TypeKind::Float(fk) => { - Ok(float_kind_rust_type(ctx, fk, self.layout(ctx))) - } - TypeKind::Complex(fk) => { - let float_path = - float_kind_rust_type(ctx, fk, self.layout(ctx)); - - ctx.generated_bindgen_complex(); - Ok(if ctx.options().enable_cxx_namespaces { - quote! { - root::__BindgenComplex<#float_path> - } - } else { - quote! { - __BindgenComplex<#float_path> - } - }) - } - TypeKind::Function(ref fs) => { - // We can't rely on the sizeof(Option>) == - // sizeof(NonZero<_>) optimization with opaque blobs (because - // they aren't NonZero), so don't *ever* use an or_opaque - // variant here. - let ty = fs.try_to_rust_ty(ctx, &())?; - - let prefix = ctx.trait_prefix(); - Ok(quote! { - ::#prefix::option::Option<#ty> - }) - } - TypeKind::Array(item, len) | TypeKind::Vector(item, len) => { - let ty = item.try_to_rust_ty(ctx, &())?; - Ok(quote! { - [ #ty ; #len ] - }) - } - TypeKind::Enum(..) => { - let path = item.namespace_aware_canonical_path(ctx); - let path = proc_macro2::TokenStream::from_str(&path.join("::")) - .unwrap(); - Ok(quote!(#path)) - } - TypeKind::TemplateInstantiation(ref inst) => { - inst.try_to_rust_ty(ctx, item) - } - TypeKind::ResolvedTypeRef(inner) => inner.try_to_rust_ty(ctx, &()), - TypeKind::TemplateAlias(..) | - TypeKind::Alias(..) | - TypeKind::BlockPointer(..) => { - if self.is_block_pointer() && !ctx.options().generate_block { - let void = c_void(ctx); - return Ok(void.to_ptr(/* is_const = */ false)); - } - - if item.is_opaque(ctx, &()) && - item.used_template_params(ctx) - .into_iter() - .any(|param| param.is_template_param(ctx, &())) - { - self.try_to_opaque(ctx, item) - } else if let Some(ty) = self - .name() - .and_then(|name| utils::type_from_named(ctx, name)) - { - Ok(ty) - } else { - utils::build_path(item, ctx) - } - } - TypeKind::Comp(ref info) => { - let template_params = item.all_template_params(ctx); - if info.has_non_type_template_params() || - (item.is_opaque(ctx, &()) && !template_params.is_empty()) - { - return self.try_to_opaque(ctx, item); - } - - utils::build_path(item, ctx) - } - TypeKind::Opaque => self.try_to_opaque(ctx, item), - TypeKind::Pointer(inner) | TypeKind::Reference(inner) => { - let is_const = ctx.resolve_type(inner).is_const(); - - let inner = - inner.into_resolver().through_type_refs().resolve(ctx); - let inner_ty = inner.expect_type(); - - let is_objc_pointer = - matches!(inner_ty.kind(), TypeKind::ObjCInterface(..)); - - // Regardless if we can properly represent the inner type, we - // should always generate a proper pointer here, so use - // infallible conversion of the inner type. - let mut ty = inner.to_rust_ty_or_opaque(ctx, &()); - ty.append_implicit_template_params(ctx, inner); - - // Avoid the first function pointer level, since it's already - // represented in Rust. - if inner_ty.canonical_type(ctx).is_function() || is_objc_pointer - { - Ok(ty) - } else { - Ok(ty.to_ptr(is_const)) - } - } - TypeKind::TypeParam => { - let name = item.canonical_name(ctx); - let ident = ctx.rust_ident(&name); - Ok(quote! { - #ident - }) - } - TypeKind::ObjCSel => Ok(quote! { - objc::runtime::Sel - }), - TypeKind::ObjCId => Ok(quote! { - id - }), - TypeKind::ObjCInterface(ref interface) => { - let name = ctx.rust_ident(interface.name()); - Ok(quote! { - #name - }) - } - ref u @ TypeKind::UnresolvedTypeRef(..) => { - unreachable!("Should have been resolved after parsing {:?}!", u) - } - } - } -} - -impl TryToOpaque for TemplateInstantiation { - type Extra = Item; - - fn try_get_layout( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> error::Result { - item.expect_type() - .layout(ctx) - .ok_or(error::Error::NoLayoutForOpaqueBlob) - } -} - -impl TryToRustTy for TemplateInstantiation { - type Extra = Item; - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> error::Result { - if self.is_opaque(ctx, item) { - return Err(error::Error::InstantiationOfOpaqueType); - } - - let def = self - .template_definition() - .into_resolver() - .through_type_refs() - .resolve(ctx); - - let mut ty = quote! {}; - let def_path = def.namespace_aware_canonical_path(ctx); - ty.append_separated( - def_path.into_iter().map(|p| ctx.rust_ident(p)), - quote!(::), - ); - - let def_params = def.self_template_params(ctx); - if def_params.is_empty() { - // This can happen if we generated an opaque type for a partial - // template specialization, and we've hit an instantiation of - // that partial specialization. - extra_assert!(def.is_opaque(ctx, &())); - return Err(error::Error::InstantiationOfOpaqueType); - } - - // TODO: If the definition type is a template class/struct - // definition's member template definition, it could rely on - // generic template parameters from its outer template - // class/struct. When we emit bindings for it, it could require - // *more* type arguments than we have here, and we will need to - // reconstruct them somehow. We don't have any means of doing - // that reconstruction at this time. - - let template_args = self - .template_arguments() - .iter() - .zip(def_params.iter()) - // Only pass type arguments for the type parameters that - // the def uses. - .filter(|&(_, param)| ctx.uses_template_parameter(def.id(), *param)) - .map(|(arg, _)| { - let arg = arg.into_resolver().through_type_refs().resolve(ctx); - let mut ty = arg.try_to_rust_ty(ctx, &())?; - ty.append_implicit_template_params(ctx, arg); - Ok(ty) - }) - .collect::>>()?; - - if template_args.is_empty() { - return Ok(ty); - } - - Ok(quote! { - #ty < #( #template_args ),* > - }) - } -} - -impl TryToRustTy for FunctionSig { - type Extra = (); - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - _: &(), - ) -> error::Result { - // TODO: we might want to consider ignoring the reference return value. - let ret = utils::fnsig_return_ty(ctx, self); - let arguments = utils::fnsig_arguments(ctx, self); - let abi = self.abi(); - - match abi { - Abi::ThisCall if !ctx.options().rust_features().thiscall_abi => { - warn!("Skipping function with thiscall ABI that isn't supported by the configured Rust target"); - Ok(proc_macro2::TokenStream::new()) - } - _ => Ok(quote! { - unsafe extern #abi fn ( #( #arguments ),* ) #ret - }), - } - } -} - -impl CodeGenerator for Function { - type Extra = Item; - - /// If we've actually generated the symbol, the number of times we've seen - /// it. - type Return = Option; - - fn codegen<'a>( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'a>, - item: &Item, - ) -> Self::Return { - debug!("::codegen: item = {:?}", item); - debug_assert!(item.is_enabled_for_codegen(ctx)); - - // We can't currently do anything with Internal functions so just - // avoid generating anything for them. - match self.linkage() { - Linkage::Internal => return None, - Linkage::External => {} - } - - // Pure virtual methods have no actual symbol, so we can't generate - // something meaningful for them. - match self.kind() { - FunctionKind::Method(ref method_kind) - if method_kind.is_pure_virtual() => - { - return None; - } - _ => {} - } - - // Similar to static member variables in a class template, we can't - // generate bindings to template functions, because the set of - // instantiations is open ended and we have no way of knowing which - // monomorphizations actually exist. - if !item.all_template_params(ctx).is_empty() { - return None; - } - - let name = self.name(); - let mut canonical_name = item.canonical_name(ctx); - let mangled_name = self.mangled_name(); - - { - let seen_symbol_name = mangled_name.unwrap_or(&canonical_name); - - // TODO: Maybe warn here if there's a type/argument mismatch, or - // something? - if result.seen_function(seen_symbol_name) { - return None; - } - result.saw_function(seen_symbol_name); - } - - let signature_item = ctx.resolve_item(self.signature()); - let signature = signature_item.kind().expect_type().canonical_type(ctx); - let signature = match *signature.kind() { - TypeKind::Function(ref sig) => sig, - _ => panic!("Signature kind is not a Function: {:?}", signature), - }; - - let args = utils::fnsig_arguments(ctx, signature); - let ret = utils::fnsig_return_ty(ctx, signature); - - let mut attributes = vec![]; - - if signature.must_use() && - ctx.options().rust_features().must_use_function - { - attributes.push(attributes::must_use()); - } - - if let Some(comment) = item.comment(ctx) { - attributes.push(attributes::doc(comment)); - } - - let abi = match signature.abi() { - Abi::ThisCall if !ctx.options().rust_features().thiscall_abi => { - warn!("Skipping function with thiscall ABI that isn't supported by the configured Rust target"); - return None; - } - Abi::Win64 if signature.is_variadic() => { - warn!("Skipping variadic function with Win64 ABI that isn't supported"); - return None; - } - Abi::Unknown(unknown_abi) => { - panic!( - "Invalid or unknown abi {:?} for function {:?} ({:?})", - unknown_abi, canonical_name, self - ); - } - abi => abi, - }; - - // Handle overloaded functions by giving each overload its own unique - // suffix. - let times_seen = result.overload_number(&canonical_name); - if times_seen > 0 { - write!(&mut canonical_name, "{}", times_seen).unwrap(); - } - - let link_name = mangled_name.unwrap_or(name); - if !utils::names_will_be_identical_after_mangling( - &canonical_name, - link_name, - Some(abi), - ) { - attributes.push(attributes::link_name(link_name)); - } - - // Unfortunately this can't piggyback on the `attributes` list because - // the #[link(wasm_import_module)] needs to happen before the `extern - // "C"` block. It doesn't get picked up properly otherwise - let wasm_link_attribute = - ctx.options().wasm_import_module_name.as_ref().map(|name| { - quote! { #[link(wasm_import_module = #name)] } - }); - - let ident = ctx.rust_ident(canonical_name); - let tokens = quote! { - #wasm_link_attribute - extern #abi { - #(#attributes)* - pub fn #ident ( #( #args ),* ) #ret; - } - }; - - // If we're doing dynamic binding generation, add to the dynamic items. - if ctx.options().dynamic_library_name.is_some() && - self.kind() == FunctionKind::Function - { - let args_identifiers = - utils::fnsig_argument_identifiers(ctx, signature); - let return_item = ctx.resolve_item(signature.return_type()); - let ret_ty = match *return_item.kind().expect_type().kind() { - TypeKind::Void => quote! {()}, - _ => return_item.to_rust_ty_or_opaque(ctx, &()), - }; - result.dynamic_items().push( - ident, - abi, - signature.is_variadic(), - ctx.options().dynamic_link_require_all, - args, - args_identifiers, - ret, - ret_ty, - ); - } else { - result.push(tokens); - } - Some(times_seen) - } -} - -fn objc_method_codegen( - ctx: &BindgenContext, - method: &ObjCMethod, - class_name: Option<&str>, - prefix: &str, -) -> proc_macro2::TokenStream { - let signature = method.signature(); - let fn_args = utils::fnsig_arguments(ctx, signature); - let fn_ret = utils::fnsig_return_ty(ctx, signature); - - let sig = if method.is_class_method() { - let fn_args = fn_args.clone(); - quote! { - ( #( #fn_args ),* ) #fn_ret - } - } else { - let fn_args = fn_args.clone(); - let args = iter::once(quote! { &self }).chain(fn_args.into_iter()); - quote! { - ( #( #args ),* ) #fn_ret - } - }; - - let methods_and_args = method.format_method_call(&fn_args); - - let body = if method.is_class_method() { - let class_name = ctx.rust_ident( - class_name - .expect("Generating a class method without class name?") - .to_owned(), - ); - quote! { - msg_send!(class!(#class_name), #methods_and_args) - } - } else { - quote! { - msg_send!(*self, #methods_and_args) - } - }; - - let method_name = - ctx.rust_ident(format!("{}{}", prefix, method.rust_name())); - - quote! { - unsafe fn #method_name #sig where ::Target: objc::Message + Sized { - #body - } - } -} - -impl CodeGenerator for ObjCInterface { - type Extra = Item; - type Return = (); - - fn codegen<'a>( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'a>, - item: &Item, - ) { - debug_assert!(item.is_enabled_for_codegen(ctx)); - - let mut impl_items = vec![]; - - for method in self.methods() { - let impl_item = objc_method_codegen(ctx, method, None, ""); - impl_items.push(impl_item); - } - - for class_method in self.class_methods() { - let ambiquity = self - .methods() - .iter() - .map(|m| m.rust_name()) - .any(|x| x == class_method.rust_name()); - let prefix = if ambiquity { "class_" } else { "" }; - let impl_item = objc_method_codegen( - ctx, - class_method, - Some(self.name()), - prefix, - ); - impl_items.push(impl_item); - } - - let trait_name = ctx.rust_ident(self.rust_name()); - let trait_constraints = quote! { - Sized + std::ops::Deref - }; - let trait_block = if self.is_template() { - let template_names: Vec = self - .template_names - .iter() - .map(|g| ctx.rust_ident(g)) - .collect(); - - quote! { - pub trait #trait_name <#(#template_names),*> : #trait_constraints { - #( #impl_items )* - } - } - } else { - quote! { - pub trait #trait_name : #trait_constraints { - #( #impl_items )* - } - } - }; - - let class_name = ctx.rust_ident(self.name()); - if !self.is_category() && !self.is_protocol() { - let struct_block = quote! { - #[repr(transparent)] - #[derive(Clone)] - pub struct #class_name(pub id); - impl std::ops::Deref for #class_name { - type Target = objc::runtime::Object; - fn deref(&self) -> &Self::Target { - unsafe { - &*self.0 - } - } - } - unsafe impl objc::Message for #class_name { } - impl #class_name { - pub fn alloc() -> Self { - Self(unsafe { - msg_send!(objc::class!(#class_name), alloc) - }) - } - } - }; - result.push(struct_block); - let mut protocol_set: HashSet = Default::default(); - for protocol_id in self.conforms_to.iter() { - protocol_set.insert(*protocol_id); - let protocol_name = ctx.rust_ident( - ctx.resolve_type(protocol_id.expect_type_id(ctx)) - .name() - .unwrap(), - ); - let impl_trait = quote! { - impl #protocol_name for #class_name { } - }; - result.push(impl_trait); - } - let mut parent_class = self.parent_class; - while let Some(parent_id) = parent_class { - let parent = parent_id - .expect_type_id(ctx) - .into_resolver() - .through_type_refs() - .resolve(ctx) - .expect_type() - .kind(); - - let parent = match parent { - TypeKind::ObjCInterface(ref parent) => parent, - _ => break, - }; - parent_class = parent.parent_class; - - let parent_name = ctx.rust_ident(parent.rust_name()); - let impl_trait = if parent.is_template() { - let template_names: Vec = parent - .template_names - .iter() - .map(|g| ctx.rust_ident(g)) - .collect(); - quote! { - impl <#(#template_names :'static),*> #parent_name <#(#template_names),*> for #class_name { - } - } - } else { - quote! { - impl #parent_name for #class_name { } - } - }; - result.push(impl_trait); - for protocol_id in parent.conforms_to.iter() { - if protocol_set.insert(*protocol_id) { - let protocol_name = ctx.rust_ident( - ctx.resolve_type(protocol_id.expect_type_id(ctx)) - .name() - .unwrap(), - ); - let impl_trait = quote! { - impl #protocol_name for #class_name { } - }; - result.push(impl_trait); - } - } - if !parent.is_template() { - let parent_struct_name = parent.name(); - let child_struct_name = self.name(); - let parent_struct = ctx.rust_ident(parent_struct_name); - let from_block = quote! { - impl From<#class_name> for #parent_struct { - fn from(child: #class_name) -> #parent_struct { - #parent_struct(child.0) - } - } - }; - result.push(from_block); - - let error_msg = format!( - "This {} cannot be downcasted to {}", - parent_struct_name, child_struct_name - ); - let try_into_block = quote! { - impl std::convert::TryFrom<#parent_struct> for #class_name { - type Error = &'static str; - fn try_from(parent: #parent_struct) -> Result<#class_name, Self::Error> { - let is_kind_of : bool = unsafe { msg_send!(parent, isKindOfClass:class!(#class_name))}; - if is_kind_of { - Ok(#class_name(parent.0)) - } else { - Err(#error_msg) - } - } - } - }; - result.push(try_into_block); - } - } - } - - if !self.is_protocol() { - let impl_block = if self.is_template() { - let template_names: Vec = self - .template_names - .iter() - .map(|g| ctx.rust_ident(g)) - .collect(); - quote! { - impl <#(#template_names :'static),*> #trait_name <#(#template_names),*> for #class_name { - } - } - } else { - quote! { - impl #trait_name for #class_name { - } - } - }; - result.push(impl_block); - } - - result.push(trait_block); - result.saw_objc(); - } -} - -pub(crate) fn codegen( - context: BindgenContext, -) -> (Vec, BindgenOptions) { - context.gen(|context| { - let _t = context.timer("codegen"); - let counter = Cell::new(0); - let mut result = CodegenResult::new(&counter); - - debug!("codegen: {:?}", context.options()); - - if context.options().emit_ir { - let codegen_items = context.codegen_items(); - for (id, item) in context.items() { - if codegen_items.contains(&id) { - println!("ir: {:?} = {:#?}", id, item); - } - } - } - - if let Some(path) = context.options().emit_ir_graphviz.as_ref() { - match dot::write_dot_file(context, path) { - Ok(()) => info!( - "Your dot file was generated successfully into: {}", - path - ), - Err(e) => warn!("{}", e), - } - } - - if let Some(spec) = context.options().depfile.as_ref() { - match spec.write(context.deps()) { - Ok(()) => info!( - "Your depfile was generated successfully into: {}", - spec.depfile_path.display() - ), - Err(e) => warn!("{}", e), - } - } - - context.resolve_item(context.root_module()).codegen( - context, - &mut result, - &(), - ); - - if let Some(ref lib_name) = context.options().dynamic_library_name { - let lib_ident = context.rust_ident(lib_name); - let dynamic_items_tokens = - result.dynamic_items().get_tokens(lib_ident); - result.push(dynamic_items_tokens); - } - - result.items - }) -} - -pub mod utils { - use super::{error, ToRustTyOrOpaque}; - use crate::ir::context::BindgenContext; - use crate::ir::function::{Abi, FunctionSig}; - use crate::ir::item::{Item, ItemCanonicalPath}; - use crate::ir::ty::TypeKind; - use proc_macro2; - use std::borrow::Cow; - use std::mem; - use std::str::FromStr; - - pub fn prepend_bitfield_unit_type( - ctx: &BindgenContext, - result: &mut Vec, - ) { - let bitfield_unit_src = include_str!("./bitfield_unit.rs"); - let bitfield_unit_src = if ctx.options().rust_features().min_const_fn { - Cow::Borrowed(bitfield_unit_src) - } else { - Cow::Owned(bitfield_unit_src.replace("const fn ", "fn ")) - }; - let bitfield_unit_type = - proc_macro2::TokenStream::from_str(&bitfield_unit_src).unwrap(); - let bitfield_unit_type = quote!(#bitfield_unit_type); - - let items = vec![bitfield_unit_type]; - let old_items = mem::replace(result, items); - result.extend(old_items); - } - - pub fn prepend_objc_header( - ctx: &BindgenContext, - result: &mut Vec, - ) { - let use_objc = if ctx.options().objc_extern_crate { - quote! { - #[macro_use] - extern crate objc; - } - } else { - quote! { - use objc; - } - }; - - let id_type = quote! { - #[allow(non_camel_case_types)] - pub type id = *mut objc::runtime::Object; - }; - - let items = vec![use_objc, id_type]; - let old_items = mem::replace(result, items); - result.extend(old_items.into_iter()); - } - - pub fn prepend_block_header( - ctx: &BindgenContext, - result: &mut Vec, - ) { - let use_block = if ctx.options().block_extern_crate { - quote! { - extern crate block; - } - } else { - quote! { - use block; - } - }; - - let items = vec![use_block]; - let old_items = mem::replace(result, items); - result.extend(old_items.into_iter()); - } - - pub fn prepend_union_types( - ctx: &BindgenContext, - result: &mut Vec, - ) { - let prefix = ctx.trait_prefix(); - - // If the target supports `const fn`, declare eligible functions - // as `const fn` else just `fn`. - let const_fn = if ctx.options().rust_features().min_const_fn { - quote! { const fn } - } else { - quote! { fn } - }; - - // TODO(emilio): The fmt::Debug impl could be way nicer with - // std::intrinsics::type_name, but... - let union_field_decl = quote! { - #[repr(C)] - pub struct __BindgenUnionField(::#prefix::marker::PhantomData); - }; - - let union_field_impl = quote! { - impl __BindgenUnionField { - #[inline] - pub #const_fn new() -> Self { - __BindgenUnionField(::#prefix::marker::PhantomData) - } - - #[inline] - pub unsafe fn as_ref(&self) -> &T { - ::#prefix::mem::transmute(self) - } - - #[inline] - pub unsafe fn as_mut(&mut self) -> &mut T { - ::#prefix::mem::transmute(self) - } - } - }; - - let union_field_default_impl = quote! { - impl ::#prefix::default::Default for __BindgenUnionField { - #[inline] - fn default() -> Self { - Self::new() - } - } - }; - - let union_field_clone_impl = quote! { - impl ::#prefix::clone::Clone for __BindgenUnionField { - #[inline] - fn clone(&self) -> Self { - Self::new() - } - } - }; - - let union_field_copy_impl = quote! { - impl ::#prefix::marker::Copy for __BindgenUnionField {} - }; - - let union_field_debug_impl = quote! { - impl ::#prefix::fmt::Debug for __BindgenUnionField { - fn fmt(&self, fmt: &mut ::#prefix::fmt::Formatter<'_>) - -> ::#prefix::fmt::Result { - fmt.write_str("__BindgenUnionField") - } - } - }; - - // The actual memory of the filed will be hashed, so that's why these - // field doesn't do anything with the hash. - let union_field_hash_impl = quote! { - impl ::#prefix::hash::Hash for __BindgenUnionField { - fn hash(&self, _state: &mut H) { - } - } - }; - - let union_field_partialeq_impl = quote! { - impl ::#prefix::cmp::PartialEq for __BindgenUnionField { - fn eq(&self, _other: &__BindgenUnionField) -> bool { - true - } - } - }; - - let union_field_eq_impl = quote! { - impl ::#prefix::cmp::Eq for __BindgenUnionField { - } - }; - - let items = vec![ - union_field_decl, - union_field_impl, - union_field_default_impl, - union_field_clone_impl, - union_field_copy_impl, - union_field_debug_impl, - union_field_hash_impl, - union_field_partialeq_impl, - union_field_eq_impl, - ]; - - let old_items = mem::replace(result, items); - result.extend(old_items.into_iter()); - } - - pub fn prepend_incomplete_array_types( - ctx: &BindgenContext, - result: &mut Vec, - ) { - let prefix = ctx.trait_prefix(); - - // If the target supports `const fn`, declare eligible functions - // as `const fn` else just `fn`. - let const_fn = if ctx.options().rust_features().min_const_fn { - quote! { const fn } - } else { - quote! { fn } - }; - - let incomplete_array_decl = quote! { - #[repr(C)] - #[derive(Default)] - pub struct __IncompleteArrayField( - ::#prefix::marker::PhantomData, [T; 0]); - }; - - let incomplete_array_impl = quote! { - impl __IncompleteArrayField { - #[inline] - pub #const_fn new() -> Self { - __IncompleteArrayField(::#prefix::marker::PhantomData, []) - } - - #[inline] - pub fn as_ptr(&self) -> *const T { - self as *const _ as *const T - } - - #[inline] - pub fn as_mut_ptr(&mut self) -> *mut T { - self as *mut _ as *mut T - } - - #[inline] - pub unsafe fn as_slice(&self, len: usize) -> &[T] { - ::#prefix::slice::from_raw_parts(self.as_ptr(), len) - } - - #[inline] - pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { - ::#prefix::slice::from_raw_parts_mut(self.as_mut_ptr(), len) - } - } - }; - - let incomplete_array_debug_impl = quote! { - impl ::#prefix::fmt::Debug for __IncompleteArrayField { - fn fmt(&self, fmt: &mut ::#prefix::fmt::Formatter<'_>) - -> ::#prefix::fmt::Result { - fmt.write_str("__IncompleteArrayField") - } - } - }; - - let items = vec![ - incomplete_array_decl, - incomplete_array_impl, - incomplete_array_debug_impl, - ]; - - let old_items = mem::replace(result, items); - result.extend(old_items.into_iter()); - } - - pub fn prepend_complex_type(result: &mut Vec) { - let complex_type = quote! { - #[derive(PartialEq, Copy, Clone, Hash, Debug, Default)] - #[repr(C)] - pub struct __BindgenComplex { - pub re: T, - pub im: T - } - }; - - let items = vec![complex_type]; - let old_items = mem::replace(result, items); - result.extend(old_items.into_iter()); - } - - pub fn build_path( - item: &Item, - ctx: &BindgenContext, - ) -> error::Result { - let path = item.namespace_aware_canonical_path(ctx); - let tokens = - proc_macro2::TokenStream::from_str(&path.join("::")).unwrap(); - - Ok(tokens) - } - - fn primitive_ty( - ctx: &BindgenContext, - name: &str, - ) -> proc_macro2::TokenStream { - let ident = ctx.rust_ident_raw(name); - quote! { - #ident - } - } - - pub fn type_from_named( - ctx: &BindgenContext, - name: &str, - ) -> Option { - // FIXME: We could use the inner item to check this is really a - // primitive type but, who the heck overrides these anyway? - Some(match name { - "int8_t" => primitive_ty(ctx, "i8"), - "uint8_t" => primitive_ty(ctx, "u8"), - "int16_t" => primitive_ty(ctx, "i16"), - "uint16_t" => primitive_ty(ctx, "u16"), - "int32_t" => primitive_ty(ctx, "i32"), - "uint32_t" => primitive_ty(ctx, "u32"), - "int64_t" => primitive_ty(ctx, "i64"), - "uint64_t" => primitive_ty(ctx, "u64"), - - "size_t" if ctx.options().size_t_is_usize => { - primitive_ty(ctx, "usize") - } - "uintptr_t" => primitive_ty(ctx, "usize"), - - "ssize_t" if ctx.options().size_t_is_usize => { - primitive_ty(ctx, "isize") - } - "intptr_t" | "ptrdiff_t" => primitive_ty(ctx, "isize"), - _ => return None, - }) - } - - pub fn fnsig_return_ty( - ctx: &BindgenContext, - sig: &FunctionSig, - ) -> proc_macro2::TokenStream { - let return_item = ctx.resolve_item(sig.return_type()); - if let TypeKind::Void = *return_item.kind().expect_type().kind() { - quote! {} - } else { - let ret_ty = return_item.to_rust_ty_or_opaque(ctx, &()); - quote! { - -> #ret_ty - } - } - } - - pub fn fnsig_arguments( - ctx: &BindgenContext, - sig: &FunctionSig, - ) -> Vec { - use super::ToPtr; - - let mut unnamed_arguments = 0; - let mut args = sig - .argument_types() - .iter() - .map(|&(ref name, ty)| { - let arg_item = ctx.resolve_item(ty); - let arg_ty = arg_item.kind().expect_type(); - - // From the C90 standard[1]: - // - // A declaration of a parameter as "array of type" shall be - // adjusted to "qualified pointer to type", where the type - // qualifiers (if any) are those specified within the [ and ] of - // the array type derivation. - // - // [1]: http://c0x.coding-guidelines.com/6.7.5.3.html - let arg_ty = match *arg_ty.canonical_type(ctx).kind() { - TypeKind::Array(t, _) => { - let stream = - if ctx.options().array_pointers_in_arguments { - arg_ty.to_rust_ty_or_opaque(ctx, arg_item) - } else { - t.to_rust_ty_or_opaque(ctx, &()) - }; - stream.to_ptr(ctx.resolve_type(t).is_const()) - } - TypeKind::Pointer(inner) => { - let inner = ctx.resolve_item(inner); - let inner_ty = inner.expect_type(); - if let TypeKind::ObjCInterface(ref interface) = - *inner_ty.canonical_type(ctx).kind() - { - let name = ctx.rust_ident(interface.name()); - quote! { - #name - } - } else { - arg_item.to_rust_ty_or_opaque(ctx, &()) - } - } - _ => arg_item.to_rust_ty_or_opaque(ctx, &()), - }; - - let arg_name = match *name { - Some(ref name) => ctx.rust_mangle(name).into_owned(), - None => { - unnamed_arguments += 1; - format!("arg{}", unnamed_arguments) - } - }; - - assert!(!arg_name.is_empty()); - let arg_name = ctx.rust_ident(arg_name); - - quote! { - #arg_name : #arg_ty - } - }) - .collect::>(); - - if sig.is_variadic() { - args.push(quote! { ... }) - } - - args - } - - pub fn fnsig_argument_identifiers( - ctx: &BindgenContext, - sig: &FunctionSig, - ) -> Vec { - let mut unnamed_arguments = 0; - let args = sig - .argument_types() - .iter() - .map(|&(ref name, _ty)| { - let arg_name = match *name { - Some(ref name) => ctx.rust_mangle(name).into_owned(), - None => { - unnamed_arguments += 1; - format!("arg{}", unnamed_arguments) - } - }; - - assert!(!arg_name.is_empty()); - let arg_name = ctx.rust_ident(arg_name); - - quote! { - #arg_name - } - }) - .collect::>(); - - args - } - - pub fn fnsig_block( - ctx: &BindgenContext, - sig: &FunctionSig, - ) -> proc_macro2::TokenStream { - let args = sig.argument_types().iter().map(|&(_, ty)| { - let arg_item = ctx.resolve_item(ty); - - arg_item.to_rust_ty_or_opaque(ctx, &()) - }); - - let return_item = ctx.resolve_item(sig.return_type()); - let ret_ty = - if let TypeKind::Void = *return_item.kind().expect_type().kind() { - quote! { () } - } else { - return_item.to_rust_ty_or_opaque(ctx, &()) - }; - - quote! { - *const ::block::Block<(#(#args,)*), #ret_ty> - } - } - - // Returns true if `canonical_name` will end up as `mangled_name` at the - // machine code level, i.e. after LLVM has applied any target specific - // mangling. - pub fn names_will_be_identical_after_mangling( - canonical_name: &str, - mangled_name: &str, - call_conv: Option, - ) -> bool { - // If the mangled name and the canonical name are the same then no - // mangling can have happened between the two versions. - if canonical_name == mangled_name { - return true; - } - - // Working with &[u8] makes indexing simpler than with &str - let canonical_name = canonical_name.as_bytes(); - let mangled_name = mangled_name.as_bytes(); - - let (mangling_prefix, expect_suffix) = match call_conv { - Some(Abi::C) | - // None is the case for global variables - None => { - (b'_', false) - } - Some(Abi::Stdcall) => (b'_', true), - Some(Abi::Fastcall) => (b'@', true), - - // This is something we don't recognize, stay on the safe side - // by emitting the `#[link_name]` attribute - Some(_) => return false, - }; - - // Check that the mangled name is long enough to at least contain the - // canonical name plus the expected prefix. - if mangled_name.len() < canonical_name.len() + 1 { - return false; - } - - // Return if the mangled name does not start with the prefix expected - // for the given calling convention. - if mangled_name[0] != mangling_prefix { - return false; - } - - // Check that the mangled name contains the canonical name after the - // prefix - if &mangled_name[1..canonical_name.len() + 1] != canonical_name { - return false; - } - - // If the given calling convention also prescribes a suffix, check that - // it exists too - if expect_suffix { - let suffix = &mangled_name[canonical_name.len() + 1..]; - - // The shortest suffix is "@0" - if suffix.len() < 2 { - return false; - } - - // Check that the suffix starts with '@' and is all ASCII decimals - // after that. - if suffix[0] != b'@' || !suffix[1..].iter().all(u8::is_ascii_digit) - { - return false; - } - } else if mangled_name.len() != canonical_name.len() + 1 { - // If we don't expect a prefix but there is one, we need the - // #[link_name] attribute - return false; - } - - true - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/struct_layout.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/struct_layout.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/struct_layout.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/codegen/struct_layout.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,438 +0,0 @@ -//! Helpers for code generation that need struct layout - -use super::helpers; - -use crate::ir::comp::CompInfo; -use crate::ir::context::BindgenContext; -use crate::ir::layout::Layout; -use crate::ir::ty::{Type, TypeKind}; -use proc_macro2::{self, Ident, Span}; -use std::cmp; - -const MAX_GUARANTEED_ALIGN: usize = 8; - -/// Trace the layout of struct. -#[derive(Debug)] -pub struct StructLayoutTracker<'a> { - name: &'a str, - ctx: &'a BindgenContext, - comp: &'a CompInfo, - is_packed: bool, - known_type_layout: Option, - is_rust_union: bool, - latest_offset: usize, - padding_count: usize, - latest_field_layout: Option, - max_field_align: usize, - last_field_was_bitfield: bool, -} - -/// Returns a size aligned to a given value. -pub fn align_to(size: usize, align: usize) -> usize { - if align == 0 { - return size; - } - - let rem = size % align; - if rem == 0 { - return size; - } - - size + align - rem -} - -/// Returns the lower power of two byte count that can hold at most n bits. -pub fn bytes_from_bits_pow2(mut n: usize) -> usize { - if n == 0 { - return 0; - } - - if n <= 8 { - return 1; - } - - if !n.is_power_of_two() { - n = n.next_power_of_two(); - } - - n / 8 -} - -#[test] -fn test_align_to() { - assert_eq!(align_to(1, 1), 1); - assert_eq!(align_to(1, 2), 2); - assert_eq!(align_to(1, 4), 4); - assert_eq!(align_to(5, 1), 5); - assert_eq!(align_to(17, 4), 20); -} - -#[test] -fn test_bytes_from_bits_pow2() { - assert_eq!(bytes_from_bits_pow2(0), 0); - for i in 1..9 { - assert_eq!(bytes_from_bits_pow2(i), 1); - } - for i in 9..17 { - assert_eq!(bytes_from_bits_pow2(i), 2); - } - for i in 17..33 { - assert_eq!(bytes_from_bits_pow2(i), 4); - } -} - -impl<'a> StructLayoutTracker<'a> { - pub fn new( - ctx: &'a BindgenContext, - comp: &'a CompInfo, - ty: &'a Type, - name: &'a str, - ) -> Self { - let known_type_layout = ty.layout(ctx); - let is_packed = comp.is_packed(ctx, known_type_layout.as_ref()); - let is_rust_union = comp.is_union() && - comp.can_be_rust_union(ctx, known_type_layout.as_ref()); - StructLayoutTracker { - name, - ctx, - comp, - is_packed, - known_type_layout, - is_rust_union, - latest_offset: 0, - padding_count: 0, - latest_field_layout: None, - max_field_align: 0, - last_field_was_bitfield: false, - } - } - - pub fn is_rust_union(&self) -> bool { - self.is_rust_union - } - - pub fn saw_vtable(&mut self) { - debug!("saw vtable for {}", self.name); - - let ptr_size = self.ctx.target_pointer_size(); - self.latest_offset += ptr_size; - self.latest_field_layout = Some(Layout::new(ptr_size, ptr_size)); - self.max_field_align = ptr_size; - } - - pub fn saw_base(&mut self, base_ty: &Type) { - debug!("saw base for {}", self.name); - if let Some(layout) = base_ty.layout(self.ctx) { - self.align_to_latest_field(layout); - - self.latest_offset += self.padding_bytes(layout) + layout.size; - self.latest_field_layout = Some(layout); - self.max_field_align = cmp::max(self.max_field_align, layout.align); - } - } - - pub fn saw_bitfield_unit(&mut self, layout: Layout) { - debug!("saw bitfield unit for {}: {:?}", self.name, layout); - - self.align_to_latest_field(layout); - - self.latest_offset += layout.size; - - debug!( - "Offset: : {} -> {}", - self.latest_offset - layout.size, - self.latest_offset - ); - - self.latest_field_layout = Some(layout); - self.last_field_was_bitfield = true; - // NB: We intentionally don't update the max_field_align here, since our - // bitfields code doesn't necessarily guarantee it, so we need to - // actually generate the dummy alignment. - } - - /// Returns a padding field if necessary for a given new field _before_ - /// adding that field. - pub fn saw_field( - &mut self, - field_name: &str, - field_ty: &Type, - field_offset: Option, - ) -> Option { - let mut field_layout = field_ty.layout(self.ctx)?; - - if let TypeKind::Array(inner, len) = - *field_ty.canonical_type(self.ctx).kind() - { - // FIXME(emilio): As an _ultra_ hack, we correct the layout returned - // by arrays of structs that have a bigger alignment than what we - // can support. - // - // This means that the structs in the array are super-unsafe to - // access, since they won't be properly aligned, but there's not too - // much we can do about it. - if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx) - { - if layout.align > MAX_GUARANTEED_ALIGN { - field_layout.size = - align_to(layout.size, layout.align) * len; - field_layout.align = MAX_GUARANTEED_ALIGN; - } - } - } - self.saw_field_with_layout(field_name, field_layout, field_offset) - } - - pub fn saw_field_with_layout( - &mut self, - field_name: &str, - field_layout: Layout, - field_offset: Option, - ) -> Option { - let will_merge_with_bitfield = self.align_to_latest_field(field_layout); - - let is_union = self.comp.is_union(); - let padding_bytes = match field_offset { - Some(offset) if offset / 8 > self.latest_offset => { - offset / 8 - self.latest_offset - } - _ => { - if will_merge_with_bitfield || - field_layout.align == 0 || - is_union - { - 0 - } else if !self.is_packed { - self.padding_bytes(field_layout) - } else if let Some(l) = self.known_type_layout { - self.padding_bytes(l) - } else { - 0 - } - } - }; - - self.latest_offset += padding_bytes; - - let padding_layout = if self.is_packed || is_union { - None - } else { - let force_padding = self.ctx.options().force_explicit_padding; - - // Otherwise the padding is useless. - let need_padding = force_padding || - padding_bytes >= field_layout.align || - field_layout.align > MAX_GUARANTEED_ALIGN; - - debug!( - "Offset: : {} -> {}", - self.latest_offset - padding_bytes, - self.latest_offset - ); - - debug!( - "align field {} to {}/{} with {} padding bytes {:?}", - field_name, - self.latest_offset, - field_offset.unwrap_or(0) / 8, - padding_bytes, - field_layout - ); - - let padding_align = if force_padding { - 1 - } else { - cmp::min(field_layout.align, MAX_GUARANTEED_ALIGN) - }; - - if need_padding && padding_bytes != 0 { - Some(Layout::new(padding_bytes, padding_align)) - } else { - None - } - }; - - self.latest_offset += field_layout.size; - self.latest_field_layout = Some(field_layout); - self.max_field_align = - cmp::max(self.max_field_align, field_layout.align); - self.last_field_was_bitfield = false; - - debug!( - "Offset: {}: {} -> {}", - field_name, - self.latest_offset - field_layout.size, - self.latest_offset - ); - - padding_layout.map(|layout| self.padding_field(layout)) - } - - pub fn add_tail_padding( - &mut self, - comp_name: &str, - comp_layout: Layout, - ) -> Option { - // Only emit an padding field at the end of a struct if the - // user configures explicit padding. - if !self.ctx.options().force_explicit_padding { - return None; - } - - // Padding doesn't make sense for rust unions. - if self.is_rust_union { - return None; - } - - if self.latest_offset == comp_layout.size { - // This struct does not contain tail padding. - return None; - } - - trace!( - "need a tail padding field for {}: offset {} -> size {}", - comp_name, - self.latest_offset, - comp_layout.size - ); - let size = comp_layout.size - self.latest_offset; - Some(self.padding_field(Layout::new(size, 0))) - } - - pub fn pad_struct( - &mut self, - layout: Layout, - ) -> Option { - debug!( - "pad_struct:\n\tself = {:#?}\n\tlayout = {:#?}", - self, layout - ); - - if layout.size < self.latest_offset { - warn!( - "Calculated wrong layout for {}, too more {} bytes", - self.name, - self.latest_offset - layout.size - ); - return None; - } - - let padding_bytes = layout.size - self.latest_offset; - if padding_bytes == 0 { - return None; - } - - let repr_align = self.ctx.options().rust_features().repr_align; - - // We always pad to get to the correct size if the struct is one of - // those we can't align properly. - // - // Note that if the last field we saw was a bitfield, we may need to pad - // regardless, because bitfields don't respect alignment as strictly as - // other fields. - if padding_bytes >= layout.align || - (self.last_field_was_bitfield && - padding_bytes >= self.latest_field_layout.unwrap().align) || - (!repr_align && layout.align > MAX_GUARANTEED_ALIGN) - { - let layout = if self.is_packed { - Layout::new(padding_bytes, 1) - } else if self.last_field_was_bitfield || - layout.align > MAX_GUARANTEED_ALIGN - { - // We've already given up on alignment here. - Layout::for_size(self.ctx, padding_bytes) - } else { - Layout::new(padding_bytes, layout.align) - }; - - debug!("pad bytes to struct {}, {:?}", self.name, layout); - - Some(self.padding_field(layout)) - } else { - None - } - } - - pub fn requires_explicit_align(&self, layout: Layout) -> bool { - let repr_align = self.ctx.options().rust_features().repr_align; - - // Always force explicit repr(align) for stuff more than 16-byte aligned - // to work-around https://github.com/rust-lang/rust/issues/54341. - // - // Worst-case this just generates redundant alignment attributes. - if repr_align && self.max_field_align >= 16 { - return true; - } - - if self.max_field_align >= layout.align { - return false; - } - - // We can only generate up-to a 8-bytes of alignment unless we support - // repr(align). - repr_align || layout.align <= MAX_GUARANTEED_ALIGN - } - - fn padding_bytes(&self, layout: Layout) -> usize { - align_to(self.latest_offset, layout.align) - self.latest_offset - } - - fn padding_field(&mut self, layout: Layout) -> proc_macro2::TokenStream { - let ty = helpers::blob(self.ctx, layout); - let padding_count = self.padding_count; - - self.padding_count += 1; - - let padding_field_name = Ident::new( - &format!("__bindgen_padding_{}", padding_count), - Span::call_site(), - ); - - self.max_field_align = cmp::max(self.max_field_align, layout.align); - - quote! { - pub #padding_field_name : #ty , - } - } - - /// Returns whether the new field is known to merge with a bitfield. - /// - /// This is just to avoid doing the same check also in pad_field. - fn align_to_latest_field(&mut self, new_field_layout: Layout) -> bool { - if self.is_packed { - // Skip to align fields when packed. - return false; - } - - let layout = match self.latest_field_layout { - Some(l) => l, - None => return false, - }; - - // If it was, we may or may not need to align, depending on what the - // current field alignment and the bitfield size and alignment are. - debug!( - "align_to_bitfield? {}: {:?} {:?}", - self.last_field_was_bitfield, layout, new_field_layout - ); - - // Avoid divide-by-zero errors if align is 0. - let align = cmp::max(1, layout.align); - - if self.last_field_was_bitfield && - new_field_layout.align <= layout.size % align && - new_field_layout.size <= layout.size % align - { - // The new field will be coalesced into some of the remaining bits. - // - // FIXME(emilio): I think this may not catch everything? - debug!("Will merge with bitfield"); - return true; - } - - // Else, just align the obvious way. - self.latest_offset += self.padding_bytes(layout); - false - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/deps.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/deps.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/deps.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/deps.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -/// Generating build depfiles from parsed bindings. -use std::{collections::BTreeSet, path::PathBuf}; - -#[derive(Debug)] -pub(crate) struct DepfileSpec { - pub output_module: String, - pub depfile_path: PathBuf, -} - -impl DepfileSpec { - pub fn write(&self, deps: &BTreeSet) -> std::io::Result<()> { - let mut buf = format!("{}:", self.output_module); - - for file in deps { - buf = format!("{} {}", buf, file); - } - - std::fs::write(&self.depfile_path, &buf) - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/extra_assertions.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/extra_assertions.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/extra_assertions.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/extra_assertions.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -//! Macros for defining extra assertions that should only be checked in testing -//! and/or CI when the `testing_only_extra_assertions` feature is enabled. - -/// Simple macro that forwards to assert! when using -/// testing_only_extra_assertions. -#[macro_export] -macro_rules! extra_assert { - ( $cond:expr ) => { - if cfg!(feature = "testing_only_extra_assertions") { - assert!($cond); - } - }; - ( $cond:expr , $( $arg:tt )+ ) => { - if cfg!(feature = "testing_only_extra_assertions") { - assert!($cond, $( $arg )* ) - } - }; -} - -/// Simple macro that forwards to assert_eq! when using -/// testing_only_extra_assertions. -#[macro_export] -macro_rules! extra_assert_eq { - ( $lhs:expr , $rhs:expr ) => { - if cfg!(feature = "testing_only_extra_assertions") { - assert_eq!($lhs, $rhs); - } - }; - ( $lhs:expr , $rhs:expr , $( $arg:tt )+ ) => { - if cfg!(feature = "testing_only_extra_assertions") { - assert!($lhs, $rhs, $( $arg )* ); - } - }; -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/features.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/features.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/features.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/features.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,302 +0,0 @@ -//! Contains code for selecting features - -#![deny(missing_docs)] -#![deny(unused_extern_crates)] - -use std::io; -use std::str::FromStr; - -/// Define RustTarget struct definition, Default impl, and conversions -/// between RustTarget and String. -macro_rules! rust_target_def { - ( $( $( #[$attr:meta] )* => $release:ident => $value:expr; )* ) => { - /// Represents the version of the Rust language to target. - /// - /// To support a beta release, use the corresponding stable release. - /// - /// This enum will have more variants added as necessary. - #[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Hash)] - #[allow(non_camel_case_types)] - pub enum RustTarget { - $( - $( - #[$attr] - )* - $release, - )* - } - - impl Default for RustTarget { - /// Gives the latest stable Rust version - fn default() -> RustTarget { - LATEST_STABLE_RUST - } - } - - impl FromStr for RustTarget { - type Err = io::Error; - - /// Create a `RustTarget` from a string. - /// - /// * The stable/beta versions of Rust are of the form "1.0", - /// "1.19", etc. - /// * The nightly version should be specified with "nightly". - fn from_str(s: &str) -> Result { - match s.as_ref() { - $( - stringify!($value) => Ok(RustTarget::$release), - )* - _ => Err( - io::Error::new( - io::ErrorKind::InvalidInput, - concat!( - "Got an invalid rust target. Accepted values ", - "are of the form ", - "\"1.0\" or \"nightly\"."))), - } - } - } - - impl From for String { - fn from(target: RustTarget) -> Self { - match target { - $( - RustTarget::$release => stringify!($value), - )* - }.into() - } - } - } -} - -/// Defines an array slice with all RustTarget values -macro_rules! rust_target_values_def { - ( $( $( #[$attr:meta] )* => $release:ident => $value:expr; )* ) => { - /// Strings of allowed `RustTarget` values - pub static RUST_TARGET_STRINGS: &'static [&str] = &[ - $( - stringify!($value), - )* - ]; - } -} - -/// Defines macro which takes a macro -macro_rules! rust_target_base { - ( $x_macro:ident ) => { - $x_macro!( - /// Rust stable 1.0 - => Stable_1_0 => 1.0; - /// Rust stable 1.17 - /// * Static lifetime elision ([RFC 1623](https://github.com/rust-lang/rfcs/blob/master/text/1623-static.md)) - => Stable_1_17 => 1.17; - /// Rust stable 1.19 - /// * Untagged unions ([RFC 1444](https://github.com/rust-lang/rfcs/blob/master/text/1444-union.md)) - => Stable_1_19 => 1.19; - /// Rust stable 1.20 - /// * Associated constants ([PR](https://github.com/rust-lang/rust/pull/42809)) - => Stable_1_20 => 1.20; - /// Rust stable 1.21 - /// * Builtin impls for `Clone` ([PR](https://github.com/rust-lang/rust/pull/43690)) - => Stable_1_21 => 1.21; - /// Rust stable 1.25 - /// * `repr(align)` ([PR](https://github.com/rust-lang/rust/pull/47006)) - => Stable_1_25 => 1.25; - /// Rust stable 1.26 - /// * [i128 / u128 support](https://doc.rust-lang.org/std/primitive.i128.html) - => Stable_1_26 => 1.26; - /// Rust stable 1.27 - /// * `must_use` attribute on functions ([PR](https://github.com/rust-lang/rust/pull/48925)) - => Stable_1_27 => 1.27; - /// Rust stable 1.28 - /// * `repr(transparent)` ([PR](https://github.com/rust-lang/rust/pull/51562)) - => Stable_1_28 => 1.28; - /// Rust stable 1.30 - /// * `const fn` support for limited cases ([PR](https://github.com/rust-lang/rust/pull/54835/) - /// * [c_void available in core](https://doc.rust-lang.org/core/ffi/enum.c_void.html) - => Stable_1_30 => 1.30; - /// Rust stable 1.33 - /// * repr(packed(N)) ([PR](https://github.com/rust-lang/rust/pull/57049)) - => Stable_1_33 => 1.33; - /// Rust stable 1.36 - /// * `MaybeUninit` instead of `mem::uninitialized()` ([PR](https://github.com/rust-lang/rust/pull/60445)) - => Stable_1_36 => 1.36; - /// Rust stable 1.40 - /// * `non_exhaustive` enums/structs ([Tracking issue](https://github.com/rust-lang/rust/issues/44109)) - => Stable_1_40 => 1.40; - /// Rust stable 1.47 - /// * `larger_arrays` ([Tracking issue](https://github.com/rust-lang/rust/pull/74060)) - => Stable_1_47 => 1.47; - /// Nightly rust - /// * `thiscall` calling convention ([Tracking issue](https://github.com/rust-lang/rust/issues/42202)) - => Nightly => nightly; - ); - } -} - -rust_target_base!(rust_target_def); -rust_target_base!(rust_target_values_def); - -/// Latest stable release of Rust -pub const LATEST_STABLE_RUST: RustTarget = RustTarget::Stable_1_47; - -/// Create RustFeatures struct definition, new(), and a getter for each field -macro_rules! rust_feature_def { - ( - $( $rust_target:ident { - $( $( #[$attr:meta] )* => $feature:ident; )* - } )* - ) => { - /// Features supported by a rust target - #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] - #[allow(missing_docs)] // Documentation should go into the relevant variants. - pub(crate) struct RustFeatures { - $( $( - $( - #[$attr] - )* - pub $feature: bool, - )* )* - } - - impl RustFeatures { - /// Gives a RustFeatures struct with all features disabled - fn new() -> Self { - RustFeatures { - $( $( - $feature: false, - )* )* - } - } - } - - impl From for RustFeatures { - fn from(rust_target: RustTarget) -> Self { - let mut features = RustFeatures::new(); - - $( - if rust_target >= RustTarget::$rust_target { - $( - features.$feature = true; - )* - } - )* - - features - } - } - } -} - -// NOTE(emilio): When adding or removing features here, make sure to update the -// documentation for the relevant variant in the rust_target_base macro -// definition. -rust_feature_def!( - Stable_1_17 { - => static_lifetime_elision; - } - Stable_1_19 { - => untagged_union; - } - Stable_1_20 { - => associated_const; - } - Stable_1_21 { - => builtin_clone_impls; - } - Stable_1_25 { - => repr_align; - } - Stable_1_26 { - => i128_and_u128; - } - Stable_1_27 { - => must_use_function; - } - Stable_1_28 { - => repr_transparent; - } - Stable_1_30 { - => min_const_fn; - => core_ffi_c_void; - } - Stable_1_33 { - => repr_packed_n; - } - Stable_1_36 { - => maybe_uninit; - } - Stable_1_40 { - => non_exhaustive; - } - Stable_1_47 { - => larger_arrays; - } - Nightly { - => thiscall_abi; - } -); - -impl Default for RustFeatures { - fn default() -> Self { - let default_rust_target: RustTarget = Default::default(); - Self::from(default_rust_target) - } -} - -#[cfg(test)] -mod test { - #![allow(unused_imports)] - use super::*; - - #[test] - fn target_features() { - let f_1_0 = RustFeatures::from(RustTarget::Stable_1_0); - assert!( - !f_1_0.static_lifetime_elision && - !f_1_0.core_ffi_c_void && - !f_1_0.untagged_union && - !f_1_0.associated_const && - !f_1_0.builtin_clone_impls && - !f_1_0.repr_align && - !f_1_0.thiscall_abi - ); - let f_1_21 = RustFeatures::from(RustTarget::Stable_1_21); - assert!( - f_1_21.static_lifetime_elision && - !f_1_21.core_ffi_c_void && - f_1_21.untagged_union && - f_1_21.associated_const && - f_1_21.builtin_clone_impls && - !f_1_21.repr_align && - !f_1_21.thiscall_abi - ); - let f_nightly = RustFeatures::from(RustTarget::Nightly); - assert!( - f_nightly.static_lifetime_elision && - f_nightly.core_ffi_c_void && - f_nightly.untagged_union && - f_nightly.associated_const && - f_nightly.builtin_clone_impls && - f_nightly.maybe_uninit && - f_nightly.repr_align && - f_nightly.thiscall_abi - ); - } - - fn test_target(target_str: &str, target: RustTarget) { - let target_string: String = target.into(); - assert_eq!(target_str, target_string); - assert_eq!(target, RustTarget::from_str(target_str).unwrap()); - } - - #[test] - fn str_to_target() { - test_target("1.0", RustTarget::Stable_1_0); - test_target("1.17", RustTarget::Stable_1_17); - test_target("1.19", RustTarget::Stable_1_19); - test_target("1.21", RustTarget::Stable_1_21); - test_target("1.25", RustTarget::Stable_1_25); - test_target("nightly", RustTarget::Nightly); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/derive.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/derive.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/derive.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/derive.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,732 +0,0 @@ -//! Determining which types for which we cannot emit `#[derive(Trait)]`. - -use std::fmt; - -use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; -use crate::ir::analysis::has_vtable::HasVtable; -use crate::ir::comp::CompKind; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::derive::CanDerive; -use crate::ir::function::FunctionSig; -use crate::ir::item::{IsOpaque, Item}; -use crate::ir::layout::Layout; -use crate::ir::template::TemplateParameters; -use crate::ir::traversal::{EdgeKind, Trace}; -use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT; -use crate::ir::ty::{Type, TypeKind}; -use crate::{Entry, HashMap, HashSet}; - -/// Which trait to consider when doing the `CannotDerive` analysis. -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub enum DeriveTrait { - /// The `Copy` trait. - Copy, - /// The `Debug` trait. - Debug, - /// The `Default` trait. - Default, - /// The `Hash` trait. - Hash, - /// The `PartialEq` and `PartialOrd` traits. - PartialEqOrPartialOrd, -} - -/// An analysis that finds for each IR item whether a trait cannot be derived. -/// -/// We use the monotone constraint function `cannot_derive`, defined as follows -/// for type T: -/// -/// * If T is Opaque and the layout of the type is known, get this layout as an -/// opaquetype and check whether it can derive using trivial checks. -/// -/// * If T is Array, a trait cannot be derived if the array is incomplete, -/// if the length of the array is larger than the limit (unless the trait -/// allows it), or the trait cannot be derived for the type of data the array -/// contains. -/// -/// * If T is Vector, a trait cannot be derived if the trait cannot be derived -/// for the type of data the vector contains. -/// -/// * If T is a type alias, a templated alias or an indirection to another type, -/// the trait cannot be derived if the trait cannot be derived for type T -/// refers to. -/// -/// * If T is a compound type, the trait cannot be derived if the trait cannot -/// be derived for any of its base members or fields. -/// -/// * If T is an instantiation of an abstract template definition, the trait -/// cannot be derived if any of the template arguments or template definition -/// cannot derive the trait. -/// -/// * For all other (simple) types, compiler and standard library limitations -/// dictate whether the trait is implemented. -#[derive(Debug, Clone)] -pub struct CannotDerive<'ctx> { - ctx: &'ctx BindgenContext, - - derive_trait: DeriveTrait, - - // The incremental result of this analysis's computation. - // Contains information whether particular item can derive `derive_trait` - can_derive: HashMap, - - // Dependencies saying that if a key ItemId has been inserted into the - // `cannot_derive_partialeq_or_partialord` set, then each of the ids - // in Vec need to be considered again. - // - // This is a subset of the natural IR graph with reversed edges, where we - // only include the edges from the IR graph that can affect whether a type - // can derive `derive_trait`. - dependencies: HashMap>, -} - -type EdgePredicate = fn(EdgeKind) -> bool; - -fn consider_edge_default(kind: EdgeKind) -> bool { - match kind { - // These are the only edges that can affect whether a type can derive - EdgeKind::BaseMember | - EdgeKind::Field | - EdgeKind::TypeReference | - EdgeKind::VarType | - EdgeKind::TemplateArgument | - EdgeKind::TemplateDeclaration | - EdgeKind::TemplateParameterDefinition => true, - - EdgeKind::Constructor | - EdgeKind::Destructor | - EdgeKind::FunctionReturn | - EdgeKind::FunctionParameter | - EdgeKind::InnerType | - EdgeKind::InnerVar | - EdgeKind::Method | - EdgeKind::Generic => false, - } -} - -impl<'ctx> CannotDerive<'ctx> { - fn insert>( - &mut self, - id: Id, - can_derive: CanDerive, - ) -> ConstrainResult { - let id = id.into(); - trace!( - "inserting {:?} can_derive<{}>={:?}", - id, - self.derive_trait, - can_derive - ); - - if let CanDerive::Yes = can_derive { - return ConstrainResult::Same; - } - - match self.can_derive.entry(id) { - Entry::Occupied(mut entry) => { - if *entry.get() < can_derive { - entry.insert(can_derive); - ConstrainResult::Changed - } else { - ConstrainResult::Same - } - } - Entry::Vacant(entry) => { - entry.insert(can_derive); - ConstrainResult::Changed - } - } - } - - fn constrain_type(&mut self, item: &Item, ty: &Type) -> CanDerive { - if !self.ctx.allowlisted_items().contains(&item.id()) { - let can_derive = self - .ctx - .blocklisted_type_implements_trait(item, self.derive_trait); - match can_derive { - CanDerive::Yes => trace!( - " blocklisted type explicitly implements {}", - self.derive_trait - ), - CanDerive::Manually => trace!( - " blocklisted type requires manual implementation of {}", - self.derive_trait - ), - CanDerive::No => trace!( - " cannot derive {} for blocklisted type", - self.derive_trait - ), - } - return can_derive; - } - - if self.derive_trait.not_by_name(self.ctx, item) { - trace!( - " cannot derive {} for explicitly excluded type", - self.derive_trait - ); - return CanDerive::No; - } - - trace!("ty: {:?}", ty); - if item.is_opaque(self.ctx, &()) { - if !self.derive_trait.can_derive_union() && - ty.is_union() && - self.ctx.options().rust_features().untagged_union - { - trace!( - " cannot derive {} for Rust unions", - self.derive_trait - ); - return CanDerive::No; - } - - let layout_can_derive = - ty.layout(self.ctx).map_or(CanDerive::Yes, |l| { - l.opaque().array_size_within_derive_limit(self.ctx) - }); - - match layout_can_derive { - CanDerive::Yes => { - trace!( - " we can trivially derive {} for the layout", - self.derive_trait - ); - } - _ => { - trace!( - " we cannot derive {} for the layout", - self.derive_trait - ); - } - }; - return layout_can_derive; - } - - match *ty.kind() { - // Handle the simple cases. These can derive traits without further - // information. - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Complex(..) | - TypeKind::Float(..) | - TypeKind::Enum(..) | - TypeKind::TypeParam | - TypeKind::UnresolvedTypeRef(..) | - TypeKind::Reference(..) | - TypeKind::ObjCInterface(..) | - TypeKind::ObjCId | - TypeKind::ObjCSel => { - return self.derive_trait.can_derive_simple(ty.kind()); - } - TypeKind::Pointer(inner) => { - let inner_type = - self.ctx.resolve_type(inner).canonical_type(self.ctx); - if let TypeKind::Function(ref sig) = *inner_type.kind() { - self.derive_trait.can_derive_fnptr(sig) - } else { - self.derive_trait.can_derive_pointer() - } - } - TypeKind::Function(ref sig) => { - self.derive_trait.can_derive_fnptr(sig) - } - - // Complex cases need more information - TypeKind::Array(t, len) => { - let inner_type = - self.can_derive.get(&t.into()).cloned().unwrap_or_default(); - if inner_type != CanDerive::Yes { - trace!( - " arrays of T for which we cannot derive {} \ - also cannot derive {}", - self.derive_trait, - self.derive_trait - ); - return CanDerive::No; - } - - if len == 0 && !self.derive_trait.can_derive_incomplete_array() - { - trace!( - " cannot derive {} for incomplete arrays", - self.derive_trait - ); - return CanDerive::No; - } - - if self.derive_trait.can_derive_large_array(self.ctx) { - trace!(" array can derive {}", self.derive_trait); - return CanDerive::Yes; - } - - if len > RUST_DERIVE_IN_ARRAY_LIMIT { - trace!( - " array is too large to derive {}, but it may be implemented", self.derive_trait - ); - return CanDerive::Manually; - } - trace!( - " array is small enough to derive {}", - self.derive_trait - ); - CanDerive::Yes - } - TypeKind::Vector(t, len) => { - let inner_type = - self.can_derive.get(&t.into()).cloned().unwrap_or_default(); - if inner_type != CanDerive::Yes { - trace!( - " vectors of T for which we cannot derive {} \ - also cannot derive {}", - self.derive_trait, - self.derive_trait - ); - return CanDerive::No; - } - assert_ne!(len, 0, "vectors cannot have zero length"); - self.derive_trait.can_derive_vector() - } - - TypeKind::Comp(ref info) => { - assert!( - !info.has_non_type_template_params(), - "The early ty.is_opaque check should have handled this case" - ); - - if !self.derive_trait.can_derive_compound_forward_decl() && - info.is_forward_declaration() - { - trace!( - " cannot derive {} for forward decls", - self.derive_trait - ); - return CanDerive::No; - } - - // NOTE: Take into account that while unions in C and C++ are copied by - // default, the may have an explicit destructor in C++, so we can't - // defer this check just for the union case. - if !self.derive_trait.can_derive_compound_with_destructor() && - self.ctx.lookup_has_destructor( - item.id().expect_type_id(self.ctx), - ) - { - trace!( - " comp has destructor which cannot derive {}", - self.derive_trait - ); - return CanDerive::No; - } - - if info.kind() == CompKind::Union { - if self.derive_trait.can_derive_union() { - if self.ctx.options().rust_features().untagged_union && - // https://github.com/rust-lang/rust/issues/36640 - (!info.self_template_params(self.ctx).is_empty() || - !item.all_template_params(self.ctx).is_empty()) - { - trace!( - " cannot derive {} for Rust union because issue 36640", self.derive_trait - ); - return CanDerive::No; - } - // fall through to be same as non-union handling - } else { - if self.ctx.options().rust_features().untagged_union { - trace!( - " cannot derive {} for Rust unions", - self.derive_trait - ); - return CanDerive::No; - } - - let layout_can_derive = - ty.layout(self.ctx).map_or(CanDerive::Yes, |l| { - l.opaque() - .array_size_within_derive_limit(self.ctx) - }); - match layout_can_derive { - CanDerive::Yes => { - trace!( - " union layout can trivially derive {}", - self.derive_trait - ); - } - _ => { - trace!( - " union layout cannot derive {}", - self.derive_trait - ); - } - }; - return layout_can_derive; - } - } - - if !self.derive_trait.can_derive_compound_with_vtable() && - item.has_vtable(self.ctx) - { - trace!( - " cannot derive {} for comp with vtable", - self.derive_trait - ); - return CanDerive::No; - } - - // Bitfield units are always represented as arrays of u8, but - // they're not traced as arrays, so we need to check here - // instead. - if !self.derive_trait.can_derive_large_array(self.ctx) && - info.has_too_large_bitfield_unit() && - !item.is_opaque(self.ctx, &()) - { - trace!( - " cannot derive {} for comp with too large bitfield unit", - self.derive_trait - ); - return CanDerive::No; - } - - let pred = self.derive_trait.consider_edge_comp(); - self.constrain_join(item, pred) - } - - TypeKind::ResolvedTypeRef(..) | - TypeKind::TemplateAlias(..) | - TypeKind::Alias(..) | - TypeKind::BlockPointer(..) => { - let pred = self.derive_trait.consider_edge_typeref(); - self.constrain_join(item, pred) - } - - TypeKind::TemplateInstantiation(..) => { - let pred = self.derive_trait.consider_edge_tmpl_inst(); - self.constrain_join(item, pred) - } - - TypeKind::Opaque => unreachable!( - "The early ty.is_opaque check should have handled this case" - ), - } - } - - fn constrain_join( - &mut self, - item: &Item, - consider_edge: EdgePredicate, - ) -> CanDerive { - let mut candidate = None; - - item.trace( - self.ctx, - &mut |sub_id, edge_kind| { - // Ignore ourselves, since union with ourself is a - // no-op. Ignore edges that aren't relevant to the - // analysis. - if sub_id == item.id() || !consider_edge(edge_kind) { - return; - } - - let can_derive = self.can_derive - .get(&sub_id) - .cloned() - .unwrap_or_default(); - - match can_derive { - CanDerive::Yes => trace!(" member {:?} can derive {}", sub_id, self.derive_trait), - CanDerive::Manually => trace!(" member {:?} cannot derive {}, but it may be implemented", sub_id, self.derive_trait), - CanDerive::No => trace!(" member {:?} cannot derive {}", sub_id, self.derive_trait), - } - - *candidate.get_or_insert(CanDerive::Yes) |= can_derive; - }, - &(), - ); - - if candidate.is_none() { - trace!( - " can derive {} because there are no members", - self.derive_trait - ); - } - candidate.unwrap_or_default() - } -} - -impl DeriveTrait { - fn not_by_name(&self, ctx: &BindgenContext, item: &Item) -> bool { - match self { - DeriveTrait::Copy => ctx.no_copy_by_name(item), - DeriveTrait::Debug => ctx.no_debug_by_name(item), - DeriveTrait::Default => ctx.no_default_by_name(item), - DeriveTrait::Hash => ctx.no_hash_by_name(item), - DeriveTrait::PartialEqOrPartialOrd => { - ctx.no_partialeq_by_name(item) - } - } - } - - fn consider_edge_comp(&self) -> EdgePredicate { - match self { - DeriveTrait::PartialEqOrPartialOrd => consider_edge_default, - _ => |kind| matches!(kind, EdgeKind::BaseMember | EdgeKind::Field), - } - } - - fn consider_edge_typeref(&self) -> EdgePredicate { - match self { - DeriveTrait::PartialEqOrPartialOrd => consider_edge_default, - _ => |kind| kind == EdgeKind::TypeReference, - } - } - - fn consider_edge_tmpl_inst(&self) -> EdgePredicate { - match self { - DeriveTrait::PartialEqOrPartialOrd => consider_edge_default, - _ => |kind| match kind { - EdgeKind::TemplateArgument | EdgeKind::TemplateDeclaration => { - true - } - _ => false, - }, - } - } - - fn can_derive_large_array(&self, ctx: &BindgenContext) -> bool { - if ctx.options().rust_features().larger_arrays { - !matches!(self, DeriveTrait::Default) - } else { - matches!(self, DeriveTrait::Copy) - } - } - - fn can_derive_union(&self) -> bool { - matches!(self, DeriveTrait::Copy) - } - - fn can_derive_compound_with_destructor(&self) -> bool { - !matches!(self, DeriveTrait::Copy) - } - - fn can_derive_compound_with_vtable(&self) -> bool { - !matches!(self, DeriveTrait::Default) - } - - fn can_derive_compound_forward_decl(&self) -> bool { - matches!(self, DeriveTrait::Copy | DeriveTrait::Debug) - } - - fn can_derive_incomplete_array(&self) -> bool { - !matches!( - self, - DeriveTrait::Copy | - DeriveTrait::Hash | - DeriveTrait::PartialEqOrPartialOrd - ) - } - - fn can_derive_fnptr(&self, f: &FunctionSig) -> CanDerive { - match (self, f.function_pointers_can_derive()) { - (DeriveTrait::Copy, _) | (DeriveTrait::Default, _) | (_, true) => { - trace!(" function pointer can derive {}", self); - CanDerive::Yes - } - (DeriveTrait::Debug, false) => { - trace!(" function pointer cannot derive {}, but it may be implemented", self); - CanDerive::Manually - } - (_, false) => { - trace!(" function pointer cannot derive {}", self); - CanDerive::No - } - } - } - - fn can_derive_vector(&self) -> CanDerive { - match self { - DeriveTrait::PartialEqOrPartialOrd => { - // FIXME: vectors always can derive PartialEq, but they should - // not derive PartialOrd: - // https://github.com/rust-lang-nursery/packed_simd/issues/48 - trace!(" vectors cannot derive PartialOrd"); - CanDerive::No - } - _ => { - trace!(" vector can derive {}", self); - CanDerive::Yes - } - } - } - - fn can_derive_pointer(&self) -> CanDerive { - match self { - DeriveTrait::Default => { - trace!(" pointer cannot derive Default"); - CanDerive::No - } - _ => { - trace!(" pointer can derive {}", self); - CanDerive::Yes - } - } - } - - fn can_derive_simple(&self, kind: &TypeKind) -> CanDerive { - match (self, kind) { - // === Default === - (DeriveTrait::Default, TypeKind::Void) | - (DeriveTrait::Default, TypeKind::NullPtr) | - (DeriveTrait::Default, TypeKind::Enum(..)) | - (DeriveTrait::Default, TypeKind::Reference(..)) | - (DeriveTrait::Default, TypeKind::TypeParam) | - (DeriveTrait::Default, TypeKind::ObjCInterface(..)) | - (DeriveTrait::Default, TypeKind::ObjCId) | - (DeriveTrait::Default, TypeKind::ObjCSel) => { - trace!(" types that always cannot derive Default"); - CanDerive::No - } - (DeriveTrait::Default, TypeKind::UnresolvedTypeRef(..)) => { - unreachable!( - "Type with unresolved type ref can't reach derive default" - ) - } - // === Hash === - (DeriveTrait::Hash, TypeKind::Float(..)) | - (DeriveTrait::Hash, TypeKind::Complex(..)) => { - trace!(" float cannot derive Hash"); - CanDerive::No - } - // === others === - _ => { - trace!(" simple type that can always derive {}", self); - CanDerive::Yes - } - } - } -} - -impl fmt::Display for DeriveTrait { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let s = match self { - DeriveTrait::Copy => "Copy", - DeriveTrait::Debug => "Debug", - DeriveTrait::Default => "Default", - DeriveTrait::Hash => "Hash", - DeriveTrait::PartialEqOrPartialOrd => "PartialEq/PartialOrd", - }; - s.fmt(f) - } -} - -impl<'ctx> MonotoneFramework for CannotDerive<'ctx> { - type Node = ItemId; - type Extra = (&'ctx BindgenContext, DeriveTrait); - type Output = HashMap; - - fn new( - (ctx, derive_trait): (&'ctx BindgenContext, DeriveTrait), - ) -> CannotDerive<'ctx> { - let can_derive = HashMap::default(); - let dependencies = generate_dependencies(ctx, consider_edge_default); - - CannotDerive { - ctx, - derive_trait, - can_derive, - dependencies, - } - } - - fn initial_worklist(&self) -> Vec { - // The transitive closure of all allowlisted items, including explicitly - // blocklisted items. - self.ctx - .allowlisted_items() - .iter() - .cloned() - .flat_map(|i| { - let mut reachable = vec![i]; - i.trace( - self.ctx, - &mut |s, _| { - reachable.push(s); - }, - &(), - ); - reachable - }) - .collect() - } - - fn constrain(&mut self, id: ItemId) -> ConstrainResult { - trace!("constrain: {:?}", id); - - if let Some(CanDerive::No) = self.can_derive.get(&id).cloned() { - trace!(" already know it cannot derive {}", self.derive_trait); - return ConstrainResult::Same; - } - - let item = self.ctx.resolve_item(id); - let can_derive = match item.as_type() { - Some(ty) => { - let mut can_derive = self.constrain_type(item, ty); - if let CanDerive::Yes = can_derive { - let is_reached_limit = - |l: Layout| l.align > RUST_DERIVE_IN_ARRAY_LIMIT; - if !self.derive_trait.can_derive_large_array(self.ctx) && - ty.layout(self.ctx).map_or(false, is_reached_limit) - { - // We have to be conservative: the struct *could* have enough - // padding that we emit an array that is longer than - // `RUST_DERIVE_IN_ARRAY_LIMIT`. If we moved padding calculations - // into the IR and computed them before this analysis, then we could - // be precise rather than conservative here. - can_derive = CanDerive::Manually; - } - } - can_derive - } - None => self.constrain_join(item, consider_edge_default), - }; - - self.insert(id, can_derive) - } - - fn each_depending_on(&self, id: ItemId, mut f: F) - where - F: FnMut(ItemId), - { - if let Some(edges) = self.dependencies.get(&id) { - for item in edges { - trace!("enqueue {:?} into worklist", item); - f(*item); - } - } - } -} - -impl<'ctx> From> for HashMap { - fn from(analysis: CannotDerive<'ctx>) -> Self { - extra_assert!(analysis - .can_derive - .values() - .all(|v| *v != CanDerive::Yes)); - - analysis.can_derive - } -} - -/// Convert a `HashMap` into a `HashSet`. -/// -/// Elements that are not `CanDerive::Yes` are kept in the set, so that it -/// represents all items that cannot derive. -pub fn as_cannot_derive_set( - can_derive: HashMap, -) -> HashSet { - can_derive - .into_iter() - .filter_map(|(k, v)| if v != CanDerive::Yes { Some(k) } else { None }) - .collect() -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_destructor.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_destructor.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_destructor.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_destructor.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,176 +0,0 @@ -//! Determining which types have destructors - -use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; -use crate::ir::comp::{CompKind, Field, FieldMethods}; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::traversal::EdgeKind; -use crate::ir::ty::TypeKind; -use crate::{HashMap, HashSet}; - -/// An analysis that finds for each IR item whether it has a destructor or not -/// -/// We use the monotone function `has destructor`, defined as follows: -/// -/// * If T is a type alias, a templated alias, or an indirection to another type, -/// T has a destructor if the type T refers to has a destructor. -/// * If T is a compound type, T has a destructor if we saw a destructor when parsing it, -/// or if it's a struct, T has a destructor if any of its base members has a destructor, -/// or if any of its fields have a destructor. -/// * If T is an instantiation of an abstract template definition, T has -/// a destructor if its template definition has a destructor, -/// or if any of the template arguments has a destructor. -/// * If T is the type of a field, that field has a destructor if it's not a bitfield, -/// and if T has a destructor. -#[derive(Debug, Clone)] -pub struct HasDestructorAnalysis<'ctx> { - ctx: &'ctx BindgenContext, - - // The incremental result of this analysis's computation. Everything in this - // set definitely has a destructor. - have_destructor: HashSet, - - // Dependencies saying that if a key ItemId has been inserted into the - // `have_destructor` set, then each of the ids in Vec need to be - // considered again. - // - // This is a subset of the natural IR graph with reversed edges, where we - // only include the edges from the IR graph that can affect whether a type - // has a destructor or not. - dependencies: HashMap>, -} - -impl<'ctx> HasDestructorAnalysis<'ctx> { - fn consider_edge(kind: EdgeKind) -> bool { - // These are the only edges that can affect whether a type has a - // destructor or not. - matches!( - kind, - EdgeKind::TypeReference | - EdgeKind::BaseMember | - EdgeKind::Field | - EdgeKind::TemplateArgument | - EdgeKind::TemplateDeclaration - ) - } - - fn insert>(&mut self, id: Id) -> ConstrainResult { - let id = id.into(); - let was_not_already_in_set = self.have_destructor.insert(id); - assert!( - was_not_already_in_set, - "We shouldn't try and insert {:?} twice because if it was \ - already in the set, `constrain` should have exited early.", - id - ); - ConstrainResult::Changed - } -} - -impl<'ctx> MonotoneFramework for HasDestructorAnalysis<'ctx> { - type Node = ItemId; - type Extra = &'ctx BindgenContext; - type Output = HashSet; - - fn new(ctx: &'ctx BindgenContext) -> Self { - let have_destructor = HashSet::default(); - let dependencies = generate_dependencies(ctx, Self::consider_edge); - - HasDestructorAnalysis { - ctx, - have_destructor, - dependencies, - } - } - - fn initial_worklist(&self) -> Vec { - self.ctx.allowlisted_items().iter().cloned().collect() - } - - fn constrain(&mut self, id: ItemId) -> ConstrainResult { - if self.have_destructor.contains(&id) { - // We've already computed that this type has a destructor and that can't - // change. - return ConstrainResult::Same; - } - - let item = self.ctx.resolve_item(id); - let ty = match item.as_type() { - None => return ConstrainResult::Same, - Some(ty) => ty, - }; - - match *ty.kind() { - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::ResolvedTypeRef(t) => { - if self.have_destructor.contains(&t.into()) { - self.insert(id) - } else { - ConstrainResult::Same - } - } - - TypeKind::Comp(ref info) => { - if info.has_own_destructor() { - return self.insert(id); - } - - match info.kind() { - CompKind::Union => ConstrainResult::Same, - CompKind::Struct => { - let base_or_field_destructor = - info.base_members().iter().any(|base| { - self.have_destructor.contains(&base.ty.into()) - }) || info.fields().iter().any( - |field| match *field { - Field::DataMember(ref data) => self - .have_destructor - .contains(&data.ty().into()), - Field::Bitfields(_) => false, - }, - ); - if base_or_field_destructor { - self.insert(id) - } else { - ConstrainResult::Same - } - } - } - } - - TypeKind::TemplateInstantiation(ref inst) => { - let definition_or_arg_destructor = self - .have_destructor - .contains(&inst.template_definition().into()) || - inst.template_arguments().iter().any(|arg| { - self.have_destructor.contains(&arg.into()) - }); - if definition_or_arg_destructor { - self.insert(id) - } else { - ConstrainResult::Same - } - } - - _ => ConstrainResult::Same, - } - } - - fn each_depending_on(&self, id: ItemId, mut f: F) - where - F: FnMut(ItemId), - { - if let Some(edges) = self.dependencies.get(&id) { - for item in edges { - trace!("enqueue {:?} into worklist", item); - f(*item); - } - } - } -} - -impl<'ctx> From> for HashSet { - fn from(analysis: HasDestructorAnalysis<'ctx>) -> Self { - analysis.have_destructor - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_float.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_float.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_float.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_float.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,252 +0,0 @@ -//! Determining which types has float. - -use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; -use crate::ir::comp::Field; -use crate::ir::comp::FieldMethods; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::traversal::EdgeKind; -use crate::ir::ty::TypeKind; -use crate::{HashMap, HashSet}; - -/// An analysis that finds for each IR item whether it has float or not. -/// -/// We use the monotone constraint function `has_float`, -/// defined as follows: -/// -/// * If T is float or complex float, T trivially has. -/// * If T is a type alias, a templated alias or an indirection to another type, -/// it has float if the type T refers to has. -/// * If T is a compound type, it has float if any of base memter or field -/// has. -/// * If T is an instantiation of an abstract template definition, T has -/// float if any of the template arguments or template definition -/// has. -#[derive(Debug, Clone)] -pub struct HasFloat<'ctx> { - ctx: &'ctx BindgenContext, - - // The incremental result of this analysis's computation. Everything in this - // set has float. - has_float: HashSet, - - // Dependencies saying that if a key ItemId has been inserted into the - // `has_float` set, then each of the ids in Vec need to be - // considered again. - // - // This is a subset of the natural IR graph with reversed edges, where we - // only include the edges from the IR graph that can affect whether a type - // has float or not. - dependencies: HashMap>, -} - -impl<'ctx> HasFloat<'ctx> { - fn consider_edge(kind: EdgeKind) -> bool { - match kind { - EdgeKind::BaseMember | - EdgeKind::Field | - EdgeKind::TypeReference | - EdgeKind::VarType | - EdgeKind::TemplateArgument | - EdgeKind::TemplateDeclaration | - EdgeKind::TemplateParameterDefinition => true, - - EdgeKind::Constructor | - EdgeKind::Destructor | - EdgeKind::FunctionReturn | - EdgeKind::FunctionParameter | - EdgeKind::InnerType | - EdgeKind::InnerVar | - EdgeKind::Method => false, - EdgeKind::Generic => false, - } - } - - fn insert>(&mut self, id: Id) -> ConstrainResult { - let id = id.into(); - trace!("inserting {:?} into the has_float set", id); - - let was_not_already_in_set = self.has_float.insert(id); - assert!( - was_not_already_in_set, - "We shouldn't try and insert {:?} twice because if it was \ - already in the set, `constrain` should have exited early.", - id - ); - - ConstrainResult::Changed - } -} - -impl<'ctx> MonotoneFramework for HasFloat<'ctx> { - type Node = ItemId; - type Extra = &'ctx BindgenContext; - type Output = HashSet; - - fn new(ctx: &'ctx BindgenContext) -> HasFloat<'ctx> { - let has_float = HashSet::default(); - let dependencies = generate_dependencies(ctx, Self::consider_edge); - - HasFloat { - ctx, - has_float, - dependencies, - } - } - - fn initial_worklist(&self) -> Vec { - self.ctx.allowlisted_items().iter().cloned().collect() - } - - fn constrain(&mut self, id: ItemId) -> ConstrainResult { - trace!("constrain: {:?}", id); - - if self.has_float.contains(&id) { - trace!(" already know it do not have float"); - return ConstrainResult::Same; - } - - let item = self.ctx.resolve_item(id); - let ty = match item.as_type() { - Some(ty) => ty, - None => { - trace!(" not a type; ignoring"); - return ConstrainResult::Same; - } - }; - - match *ty.kind() { - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Function(..) | - TypeKind::Enum(..) | - TypeKind::Reference(..) | - TypeKind::TypeParam | - TypeKind::Opaque | - TypeKind::Pointer(..) | - TypeKind::UnresolvedTypeRef(..) | - TypeKind::ObjCInterface(..) | - TypeKind::ObjCId | - TypeKind::ObjCSel => { - trace!(" simple type that do not have float"); - ConstrainResult::Same - } - - TypeKind::Float(..) | TypeKind::Complex(..) => { - trace!(" float type has float"); - self.insert(id) - } - - TypeKind::Array(t, _) => { - if self.has_float.contains(&t.into()) { - trace!( - " Array with type T that has float also has float" - ); - return self.insert(id); - } - trace!(" Array with type T that do not have float also do not have float"); - ConstrainResult::Same - } - TypeKind::Vector(t, _) => { - if self.has_float.contains(&t.into()) { - trace!( - " Vector with type T that has float also has float" - ); - return self.insert(id); - } - trace!(" Vector with type T that do not have float also do not have float"); - ConstrainResult::Same - } - - TypeKind::ResolvedTypeRef(t) | - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::BlockPointer(t) => { - if self.has_float.contains(&t.into()) { - trace!( - " aliases and type refs to T which have float \ - also have float" - ); - self.insert(id) - } else { - trace!(" aliases and type refs to T which do not have float \ - also do not have floaarrayt"); - ConstrainResult::Same - } - } - - TypeKind::Comp(ref info) => { - let bases_have = info - .base_members() - .iter() - .any(|base| self.has_float.contains(&base.ty.into())); - if bases_have { - trace!(" bases have float, so we also have"); - return self.insert(id); - } - let fields_have = info.fields().iter().any(|f| match *f { - Field::DataMember(ref data) => { - self.has_float.contains(&data.ty().into()) - } - Field::Bitfields(ref bfu) => bfu - .bitfields() - .iter() - .any(|b| self.has_float.contains(&b.ty().into())), - }); - if fields_have { - trace!(" fields have float, so we also have"); - return self.insert(id); - } - - trace!(" comp doesn't have float"); - ConstrainResult::Same - } - - TypeKind::TemplateInstantiation(ref template) => { - let args_have = template - .template_arguments() - .iter() - .any(|arg| self.has_float.contains(&arg.into())); - if args_have { - trace!( - " template args have float, so \ - insantiation also has float" - ); - return self.insert(id); - } - - let def_has = self - .has_float - .contains(&template.template_definition().into()); - if def_has { - trace!( - " template definition has float, so \ - insantiation also has" - ); - return self.insert(id); - } - - trace!(" template instantiation do not have float"); - ConstrainResult::Same - } - } - } - - fn each_depending_on(&self, id: ItemId, mut f: F) - where - F: FnMut(ItemId), - { - if let Some(edges) = self.dependencies.get(&id) { - for item in edges { - trace!("enqueue {:?} into worklist", item); - f(*item); - } - } - } -} - -impl<'ctx> From> for HashSet { - fn from(analysis: HasFloat<'ctx>) -> Self { - analysis.has_float - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_type_param_in_array.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_type_param_in_array.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_type_param_in_array.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_type_param_in_array.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,252 +0,0 @@ -//! Determining which types has typed parameters in array. - -use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; -use crate::ir::comp::Field; -use crate::ir::comp::FieldMethods; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::traversal::EdgeKind; -use crate::ir::ty::TypeKind; -use crate::{HashMap, HashSet}; - -/// An analysis that finds for each IR item whether it has array or not. -/// -/// We use the monotone constraint function `has_type_parameter_in_array`, -/// defined as follows: -/// -/// * If T is Array type with type parameter, T trivially has. -/// * If T is a type alias, a templated alias or an indirection to another type, -/// it has type parameter in array if the type T refers to has. -/// * If T is a compound type, it has array if any of base memter or field -/// has type paramter in array. -/// * If T is an instantiation of an abstract template definition, T has -/// type parameter in array if any of the template arguments or template definition -/// has. -#[derive(Debug, Clone)] -pub struct HasTypeParameterInArray<'ctx> { - ctx: &'ctx BindgenContext, - - // The incremental result of this analysis's computation. Everything in this - // set has array. - has_type_parameter_in_array: HashSet, - - // Dependencies saying that if a key ItemId has been inserted into the - // `has_type_parameter_in_array` set, then each of the ids in Vec need to be - // considered again. - // - // This is a subset of the natural IR graph with reversed edges, where we - // only include the edges from the IR graph that can affect whether a type - // has array or not. - dependencies: HashMap>, -} - -impl<'ctx> HasTypeParameterInArray<'ctx> { - fn consider_edge(kind: EdgeKind) -> bool { - match kind { - // These are the only edges that can affect whether a type has type parameter - // in array or not. - EdgeKind::BaseMember | - EdgeKind::Field | - EdgeKind::TypeReference | - EdgeKind::VarType | - EdgeKind::TemplateArgument | - EdgeKind::TemplateDeclaration | - EdgeKind::TemplateParameterDefinition => true, - - EdgeKind::Constructor | - EdgeKind::Destructor | - EdgeKind::FunctionReturn | - EdgeKind::FunctionParameter | - EdgeKind::InnerType | - EdgeKind::InnerVar | - EdgeKind::Method => false, - EdgeKind::Generic => false, - } - } - - fn insert>(&mut self, id: Id) -> ConstrainResult { - let id = id.into(); - trace!( - "inserting {:?} into the has_type_parameter_in_array set", - id - ); - - let was_not_already_in_set = - self.has_type_parameter_in_array.insert(id); - assert!( - was_not_already_in_set, - "We shouldn't try and insert {:?} twice because if it was \ - already in the set, `constrain` should have exited early.", - id - ); - - ConstrainResult::Changed - } -} - -impl<'ctx> MonotoneFramework for HasTypeParameterInArray<'ctx> { - type Node = ItemId; - type Extra = &'ctx BindgenContext; - type Output = HashSet; - - fn new(ctx: &'ctx BindgenContext) -> HasTypeParameterInArray<'ctx> { - let has_type_parameter_in_array = HashSet::default(); - let dependencies = generate_dependencies(ctx, Self::consider_edge); - - HasTypeParameterInArray { - ctx, - has_type_parameter_in_array, - dependencies, - } - } - - fn initial_worklist(&self) -> Vec { - self.ctx.allowlisted_items().iter().cloned().collect() - } - - fn constrain(&mut self, id: ItemId) -> ConstrainResult { - trace!("constrain: {:?}", id); - - if self.has_type_parameter_in_array.contains(&id) { - trace!(" already know it do not have array"); - return ConstrainResult::Same; - } - - let item = self.ctx.resolve_item(id); - let ty = match item.as_type() { - Some(ty) => ty, - None => { - trace!(" not a type; ignoring"); - return ConstrainResult::Same; - } - }; - - match *ty.kind() { - // Handle the simple cases. These cannot have array in type parameter - // without further information. - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Vector(..) | - TypeKind::Complex(..) | - TypeKind::Function(..) | - TypeKind::Enum(..) | - TypeKind::Reference(..) | - TypeKind::TypeParam | - TypeKind::Opaque | - TypeKind::Pointer(..) | - TypeKind::UnresolvedTypeRef(..) | - TypeKind::ObjCInterface(..) | - TypeKind::ObjCId | - TypeKind::ObjCSel => { - trace!(" simple type that do not have array"); - ConstrainResult::Same - } - - TypeKind::Array(t, _) => { - let inner_ty = - self.ctx.resolve_type(t).canonical_type(self.ctx); - match *inner_ty.kind() { - TypeKind::TypeParam => { - trace!(" Array with Named type has type parameter"); - self.insert(id) - } - _ => { - trace!( - " Array without Named type does have type parameter" - ); - ConstrainResult::Same - } - } - } - - TypeKind::ResolvedTypeRef(t) | - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::BlockPointer(t) => { - if self.has_type_parameter_in_array.contains(&t.into()) { - trace!( - " aliases and type refs to T which have array \ - also have array" - ); - self.insert(id) - } else { - trace!( - " aliases and type refs to T which do not have array \ - also do not have array" - ); - ConstrainResult::Same - } - } - - TypeKind::Comp(ref info) => { - let bases_have = info.base_members().iter().any(|base| { - self.has_type_parameter_in_array.contains(&base.ty.into()) - }); - if bases_have { - trace!(" bases have array, so we also have"); - return self.insert(id); - } - let fields_have = info.fields().iter().any(|f| match *f { - Field::DataMember(ref data) => self - .has_type_parameter_in_array - .contains(&data.ty().into()), - Field::Bitfields(..) => false, - }); - if fields_have { - trace!(" fields have array, so we also have"); - return self.insert(id); - } - - trace!(" comp doesn't have array"); - ConstrainResult::Same - } - - TypeKind::TemplateInstantiation(ref template) => { - let args_have = - template.template_arguments().iter().any(|arg| { - self.has_type_parameter_in_array.contains(&arg.into()) - }); - if args_have { - trace!( - " template args have array, so \ - insantiation also has array" - ); - return self.insert(id); - } - - let def_has = self - .has_type_parameter_in_array - .contains(&template.template_definition().into()); - if def_has { - trace!( - " template definition has array, so \ - insantiation also has" - ); - return self.insert(id); - } - - trace!(" template instantiation do not have array"); - ConstrainResult::Same - } - } - } - - fn each_depending_on(&self, id: ItemId, mut f: F) - where - F: FnMut(ItemId), - { - if let Some(edges) = self.dependencies.get(&id) { - for item in edges { - trace!("enqueue {:?} into worklist", item); - f(*item); - } - } - } -} - -impl<'ctx> From> for HashSet { - fn from(analysis: HasTypeParameterInArray<'ctx>) -> Self { - analysis.has_type_parameter_in_array - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_vtable.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_vtable.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_vtable.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/has_vtable.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,240 +0,0 @@ -//! Determining which types has vtable - -use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::traversal::EdgeKind; -use crate::ir::ty::TypeKind; -use crate::{Entry, HashMap}; -use std::cmp; -use std::ops; - -/// The result of the `HasVtableAnalysis` for an individual item. -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub enum HasVtableResult { - /// The item does not have a vtable pointer. - No, - - /// The item has a vtable and the actual vtable pointer is within this item. - SelfHasVtable, - - /// The item has a vtable, but the actual vtable pointer is in a base - /// member. - BaseHasVtable, -} - -impl Default for HasVtableResult { - fn default() -> Self { - HasVtableResult::No - } -} - -impl HasVtableResult { - /// Take the least upper bound of `self` and `rhs`. - pub fn join(self, rhs: Self) -> Self { - cmp::max(self, rhs) - } -} - -impl ops::BitOr for HasVtableResult { - type Output = Self; - - fn bitor(self, rhs: HasVtableResult) -> Self::Output { - self.join(rhs) - } -} - -impl ops::BitOrAssign for HasVtableResult { - fn bitor_assign(&mut self, rhs: HasVtableResult) { - *self = self.join(rhs) - } -} - -/// An analysis that finds for each IR item whether it has vtable or not -/// -/// We use the monotone function `has vtable`, defined as follows: -/// -/// * If T is a type alias, a templated alias, an indirection to another type, -/// or a reference of a type, T has vtable if the type T refers to has vtable. -/// * If T is a compound type, T has vtable if we saw a virtual function when -/// parsing it or any of its base member has vtable. -/// * If T is an instantiation of an abstract template definition, T has -/// vtable if template definition has vtable -#[derive(Debug, Clone)] -pub struct HasVtableAnalysis<'ctx> { - ctx: &'ctx BindgenContext, - - // The incremental result of this analysis's computation. Everything in this - // set definitely has a vtable. - have_vtable: HashMap, - - // Dependencies saying that if a key ItemId has been inserted into the - // `have_vtable` set, then each of the ids in Vec need to be - // considered again. - // - // This is a subset of the natural IR graph with reversed edges, where we - // only include the edges from the IR graph that can affect whether a type - // has a vtable or not. - dependencies: HashMap>, -} - -impl<'ctx> HasVtableAnalysis<'ctx> { - fn consider_edge(kind: EdgeKind) -> bool { - // These are the only edges that can affect whether a type has a - // vtable or not. - matches!( - kind, - EdgeKind::TypeReference | - EdgeKind::BaseMember | - EdgeKind::TemplateDeclaration - ) - } - - fn insert>( - &mut self, - id: Id, - result: HasVtableResult, - ) -> ConstrainResult { - if let HasVtableResult::No = result { - return ConstrainResult::Same; - } - - let id = id.into(); - match self.have_vtable.entry(id) { - Entry::Occupied(mut entry) => { - if *entry.get() < result { - entry.insert(result); - ConstrainResult::Changed - } else { - ConstrainResult::Same - } - } - Entry::Vacant(entry) => { - entry.insert(result); - ConstrainResult::Changed - } - } - } - - fn forward(&mut self, from: Id1, to: Id2) -> ConstrainResult - where - Id1: Into, - Id2: Into, - { - let from = from.into(); - let to = to.into(); - - match self.have_vtable.get(&from).cloned() { - None => ConstrainResult::Same, - Some(r) => self.insert(to, r), - } - } -} - -impl<'ctx> MonotoneFramework for HasVtableAnalysis<'ctx> { - type Node = ItemId; - type Extra = &'ctx BindgenContext; - type Output = HashMap; - - fn new(ctx: &'ctx BindgenContext) -> HasVtableAnalysis<'ctx> { - let have_vtable = HashMap::default(); - let dependencies = generate_dependencies(ctx, Self::consider_edge); - - HasVtableAnalysis { - ctx, - have_vtable, - dependencies, - } - } - - fn initial_worklist(&self) -> Vec { - self.ctx.allowlisted_items().iter().cloned().collect() - } - - fn constrain(&mut self, id: ItemId) -> ConstrainResult { - trace!("constrain {:?}", id); - - let item = self.ctx.resolve_item(id); - let ty = match item.as_type() { - None => return ConstrainResult::Same, - Some(ty) => ty, - }; - - // TODO #851: figure out a way to handle deriving from template type parameters. - match *ty.kind() { - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::ResolvedTypeRef(t) | - TypeKind::Reference(t) => { - trace!( - " aliases and references forward to their inner type" - ); - self.forward(t, id) - } - - TypeKind::Comp(ref info) => { - trace!(" comp considers its own methods and bases"); - let mut result = HasVtableResult::No; - - if info.has_own_virtual_method() { - trace!(" comp has its own virtual method"); - result |= HasVtableResult::SelfHasVtable; - } - - let bases_has_vtable = info.base_members().iter().any(|base| { - trace!(" comp has a base with a vtable: {:?}", base); - self.have_vtable.contains_key(&base.ty.into()) - }); - if bases_has_vtable { - result |= HasVtableResult::BaseHasVtable; - } - - self.insert(id, result) - } - - TypeKind::TemplateInstantiation(ref inst) => { - self.forward(inst.template_definition(), id) - } - - _ => ConstrainResult::Same, - } - } - - fn each_depending_on(&self, id: ItemId, mut f: F) - where - F: FnMut(ItemId), - { - if let Some(edges) = self.dependencies.get(&id) { - for item in edges { - trace!("enqueue {:?} into worklist", item); - f(*item); - } - } - } -} - -impl<'ctx> From> for HashMap { - fn from(analysis: HasVtableAnalysis<'ctx>) -> Self { - // We let the lack of an entry mean "No" to save space. - extra_assert!(analysis - .have_vtable - .values() - .all(|v| { *v != HasVtableResult::No })); - - analysis.have_vtable - } -} - -/// A convenience trait for the things for which we might wonder if they have a -/// vtable during codegen. -/// -/// This is not for _computing_ whether the thing has a vtable, it is for -/// looking up the results of the HasVtableAnalysis's computations for a -/// specific thing. -pub trait HasVtable { - /// Return `true` if this thing has vtable, `false` otherwise. - fn has_vtable(&self, ctx: &BindgenContext) -> bool; - - /// Return `true` if this thing has an actual vtable pointer in itself, as - /// opposed to transitively in a base member. - fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool; -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/mod.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/mod.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/mod.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,398 +0,0 @@ -//! Fix-point analyses on the IR using the "monotone framework". -//! -//! A lattice is a set with a partial ordering between elements, where there is -//! a single least upper bound and a single greatest least bound for every -//! subset. We are dealing with finite lattices, which means that it has a -//! finite number of elements, and it follows that there exists a single top and -//! a single bottom member of the lattice. For example, the power set of a -//! finite set forms a finite lattice where partial ordering is defined by set -//! inclusion, that is `a <= b` if `a` is a subset of `b`. Here is the finite -//! lattice constructed from the set {0,1,2}: -//! -//! ```text -//! .----- Top = {0,1,2} -----. -//! / | \ -//! / | \ -//! / | \ -//! {0,1} -------. {0,2} .--------- {1,2} -//! | \ / \ / | -//! | / \ | -//! | / \ / \ | -//! {0} --------' {1} `---------- {2} -//! \ | / -//! \ | / -//! \ | / -//! `------ Bottom = {} ------' -//! ``` -//! -//! A monotone function `f` is a function where if `x <= y`, then it holds that -//! `f(x) <= f(y)`. It should be clear that running a monotone function to a -//! fix-point on a finite lattice will always terminate: `f` can only "move" -//! along the lattice in a single direction, and therefore can only either find -//! a fix-point in the middle of the lattice or continue to the top or bottom -//! depending if it is ascending or descending the lattice respectively. -//! -//! For a deeper introduction to the general form of this kind of analysis, see -//! [Static Program Analysis by Anders Møller and Michael I. Schwartzbach][spa]. -//! -//! [spa]: https://cs.au.dk/~amoeller/spa/spa.pdf - -// Re-export individual analyses. -mod template_params; -pub use self::template_params::UsedTemplateParameters; -mod derive; -pub use self::derive::{as_cannot_derive_set, CannotDerive, DeriveTrait}; -mod has_vtable; -pub use self::has_vtable::{HasVtable, HasVtableAnalysis, HasVtableResult}; -mod has_destructor; -pub use self::has_destructor::HasDestructorAnalysis; -mod has_type_param_in_array; -pub use self::has_type_param_in_array::HasTypeParameterInArray; -mod has_float; -pub use self::has_float::HasFloat; -mod sizedness; -pub use self::sizedness::{Sizedness, SizednessAnalysis, SizednessResult}; - -use crate::ir::context::{BindgenContext, ItemId}; - -use crate::ir::traversal::{EdgeKind, Trace}; -use crate::HashMap; -use std::fmt; -use std::ops; - -/// An analysis in the monotone framework. -/// -/// Implementors of this trait must maintain the following two invariants: -/// -/// 1. The concrete data must be a member of a finite-height lattice. -/// 2. The concrete `constrain` method must be monotone: that is, -/// if `x <= y`, then `constrain(x) <= constrain(y)`. -/// -/// If these invariants do not hold, iteration to a fix-point might never -/// complete. -/// -/// For a simple example analysis, see the `ReachableFrom` type in the `tests` -/// module below. -pub trait MonotoneFramework: Sized + fmt::Debug { - /// The type of node in our dependency graph. - /// - /// This is just generic (and not `ItemId`) so that we can easily unit test - /// without constructing real `Item`s and their `ItemId`s. - type Node: Copy; - - /// Any extra data that is needed during computation. - /// - /// Again, this is just generic (and not `&BindgenContext`) so that we can - /// easily unit test without constructing real `BindgenContext`s full of - /// real `Item`s and real `ItemId`s. - type Extra: Sized; - - /// The final output of this analysis. Once we have reached a fix-point, we - /// convert `self` into this type, and return it as the final result of the - /// analysis. - type Output: From + fmt::Debug; - - /// Construct a new instance of this analysis. - fn new(extra: Self::Extra) -> Self; - - /// Get the initial set of nodes from which to start the analysis. Unless - /// you are sure of some domain-specific knowledge, this should be the - /// complete set of nodes. - fn initial_worklist(&self) -> Vec; - - /// Update the analysis for the given node. - /// - /// If this results in changing our internal state (ie, we discovered that - /// we have not reached a fix-point and iteration should continue), return - /// `ConstrainResult::Changed`. Otherwise, return `ConstrainResult::Same`. - /// When `constrain` returns `ConstrainResult::Same` for all nodes in the - /// set, we have reached a fix-point and the analysis is complete. - fn constrain(&mut self, node: Self::Node) -> ConstrainResult; - - /// For each node `d` that depends on the given `node`'s current answer when - /// running `constrain(d)`, call `f(d)`. This informs us which new nodes to - /// queue up in the worklist when `constrain(node)` reports updated - /// information. - fn each_depending_on(&self, node: Self::Node, f: F) - where - F: FnMut(Self::Node); -} - -/// Whether an analysis's `constrain` function modified the incremental results -/// or not. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum ConstrainResult { - /// The incremental results were updated, and the fix-point computation - /// should continue. - Changed, - - /// The incremental results were not updated. - Same, -} - -impl Default for ConstrainResult { - fn default() -> Self { - ConstrainResult::Same - } -} - -impl ops::BitOr for ConstrainResult { - type Output = Self; - - fn bitor(self, rhs: ConstrainResult) -> Self::Output { - if self == ConstrainResult::Changed || rhs == ConstrainResult::Changed { - ConstrainResult::Changed - } else { - ConstrainResult::Same - } - } -} - -impl ops::BitOrAssign for ConstrainResult { - fn bitor_assign(&mut self, rhs: ConstrainResult) { - *self = *self | rhs; - } -} - -/// Run an analysis in the monotone framework. -pub fn analyze(extra: Analysis::Extra) -> Analysis::Output -where - Analysis: MonotoneFramework, -{ - let mut analysis = Analysis::new(extra); - let mut worklist = analysis.initial_worklist(); - - while let Some(node) = worklist.pop() { - if let ConstrainResult::Changed = analysis.constrain(node) { - analysis.each_depending_on(node, |needs_work| { - worklist.push(needs_work); - }); - } - } - - analysis.into() -} - -/// Generate the dependency map for analysis -pub fn generate_dependencies( - ctx: &BindgenContext, - consider_edge: F, -) -> HashMap> -where - F: Fn(EdgeKind) -> bool, -{ - let mut dependencies = HashMap::default(); - - for &item in ctx.allowlisted_items() { - dependencies.entry(item).or_insert_with(Vec::new); - - { - // We reverse our natural IR graph edges to find dependencies - // between nodes. - item.trace( - ctx, - &mut |sub_item: ItemId, edge_kind| { - if ctx.allowlisted_items().contains(&sub_item) && - consider_edge(edge_kind) - { - dependencies - .entry(sub_item) - .or_insert_with(Vec::new) - .push(item); - } - }, - &(), - ); - } - } - dependencies -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{HashMap, HashSet}; - - // Here we find the set of nodes that are reachable from any given - // node. This is a lattice mapping nodes to subsets of all nodes. Our join - // function is set union. - // - // This is our test graph: - // - // +---+ +---+ - // | | | | - // | 1 | .----| 2 | - // | | | | | - // +---+ | +---+ - // | | ^ - // | | | - // | +---+ '------' - // '----->| | - // | 3 | - // .------| |------. - // | +---+ | - // | ^ | - // v | v - // +---+ | +---+ +---+ - // | | | | | | | - // | 4 | | | 5 |--->| 6 | - // | | | | | | | - // +---+ | +---+ +---+ - // | | | | - // | | | v - // | +---+ | +---+ - // | | | | | | - // '----->| 7 |<-----' | 8 | - // | | | | - // +---+ +---+ - // - // And here is the mapping from a node to the set of nodes that are - // reachable from it within the test graph: - // - // 1: {3,4,5,6,7,8} - // 2: {2} - // 3: {3,4,5,6,7,8} - // 4: {3,4,5,6,7,8} - // 5: {3,4,5,6,7,8} - // 6: {8} - // 7: {3,4,5,6,7,8} - // 8: {} - - #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] - struct Node(usize); - - #[derive(Clone, Debug, Default, PartialEq, Eq)] - struct Graph(HashMap>); - - impl Graph { - fn make_test_graph() -> Graph { - let mut g = Graph::default(); - g.0.insert(Node(1), vec![Node(3)]); - g.0.insert(Node(2), vec![Node(2)]); - g.0.insert(Node(3), vec![Node(4), Node(5)]); - g.0.insert(Node(4), vec![Node(7)]); - g.0.insert(Node(5), vec![Node(6), Node(7)]); - g.0.insert(Node(6), vec![Node(8)]); - g.0.insert(Node(7), vec![Node(3)]); - g.0.insert(Node(8), vec![]); - g - } - - fn reverse(&self) -> Graph { - let mut reversed = Graph::default(); - for (node, edges) in self.0.iter() { - reversed.0.entry(*node).or_insert(vec![]); - for referent in edges.iter() { - reversed.0.entry(*referent).or_insert(vec![]).push(*node); - } - } - reversed - } - } - - #[derive(Clone, Debug, PartialEq, Eq)] - struct ReachableFrom<'a> { - reachable: HashMap>, - graph: &'a Graph, - reversed: Graph, - } - - impl<'a> MonotoneFramework for ReachableFrom<'a> { - type Node = Node; - type Extra = &'a Graph; - type Output = HashMap>; - - fn new(graph: &'a Graph) -> ReachableFrom { - let reversed = graph.reverse(); - ReachableFrom { - reachable: Default::default(), - graph: graph, - reversed: reversed, - } - } - - fn initial_worklist(&self) -> Vec { - self.graph.0.keys().cloned().collect() - } - - fn constrain(&mut self, node: Node) -> ConstrainResult { - // The set of nodes reachable from a node `x` is - // - // reachable(x) = s_0 U s_1 U ... U reachable(s_0) U reachable(s_1) U ... - // - // where there exist edges from `x` to each of `s_0, s_1, ...`. - // - // Yes, what follows is a **terribly** inefficient set union - // implementation. Don't copy this code outside of this test! - - let original_size = self - .reachable - .entry(node) - .or_insert(HashSet::default()) - .len(); - - for sub_node in self.graph.0[&node].iter() { - self.reachable.get_mut(&node).unwrap().insert(*sub_node); - - let sub_reachable = self - .reachable - .entry(*sub_node) - .or_insert(HashSet::default()) - .clone(); - - for transitive in sub_reachable { - self.reachable.get_mut(&node).unwrap().insert(transitive); - } - } - - let new_size = self.reachable[&node].len(); - if original_size != new_size { - ConstrainResult::Changed - } else { - ConstrainResult::Same - } - } - - fn each_depending_on(&self, node: Node, mut f: F) - where - F: FnMut(Node), - { - for dep in self.reversed.0[&node].iter() { - f(*dep); - } - } - } - - impl<'a> From> for HashMap> { - fn from(reachable: ReachableFrom<'a>) -> Self { - reachable.reachable - } - } - - #[test] - fn monotone() { - let g = Graph::make_test_graph(); - let reachable = analyze::(&g); - println!("reachable = {:#?}", reachable); - - fn nodes(nodes: A) -> HashSet - where - A: AsRef<[usize]>, - { - nodes.as_ref().iter().cloned().map(Node).collect() - } - - let mut expected = HashMap::default(); - expected.insert(Node(1), nodes([3, 4, 5, 6, 7, 8])); - expected.insert(Node(2), nodes([2])); - expected.insert(Node(3), nodes([3, 4, 5, 6, 7, 8])); - expected.insert(Node(4), nodes([3, 4, 5, 6, 7, 8])); - expected.insert(Node(5), nodes([3, 4, 5, 6, 7, 8])); - expected.insert(Node(6), nodes([8])); - expected.insert(Node(7), nodes([3, 4, 5, 6, 7, 8])); - expected.insert(Node(8), nodes([])); - println!("expected = {:#?}", expected); - - assert_eq!(reachable, expected); - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/sizedness.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/sizedness.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/sizedness.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/sizedness.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,361 +0,0 @@ -//! Determining the sizedness of types (as base classes and otherwise). - -use super::{ - generate_dependencies, ConstrainResult, HasVtable, MonotoneFramework, -}; -use crate::ir::context::{BindgenContext, TypeId}; -use crate::ir::item::IsOpaque; -use crate::ir::traversal::EdgeKind; -use crate::ir::ty::TypeKind; -use crate::{Entry, HashMap}; -use std::{cmp, ops}; - -/// The result of the `Sizedness` analysis for an individual item. -/// -/// This is a chain lattice of the form: -/// -/// ```ignore -/// NonZeroSized -/// | -/// DependsOnTypeParam -/// | -/// ZeroSized -/// ``` -/// -/// We initially assume that all types are `ZeroSized` and then update our -/// understanding as we learn more about each type. -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub enum SizednessResult { - /// The type is zero-sized. - /// - /// This means that if it is a C++ type, and is not being used as a base - /// member, then we must add an `_address` byte to enforce the - /// unique-address-per-distinct-object-instance rule. - ZeroSized, - - /// Whether this type is zero-sized or not depends on whether a type - /// parameter is zero-sized or not. - /// - /// For example, given these definitions: - /// - /// ```c++ - /// template - /// class Flongo : public T {}; - /// - /// class Empty {}; - /// - /// class NonEmpty { int x; }; - /// ``` - /// - /// Then `Flongo` is zero-sized, and needs an `_address` byte - /// inserted, while `Flongo` is *not* zero-sized, and should *not* - /// have an `_address` byte inserted. - /// - /// We don't properly handle this situation correctly right now: - /// https://github.com/rust-lang/rust-bindgen/issues/586 - DependsOnTypeParam, - - /// Has some size that is known to be greater than zero. That doesn't mean - /// it has a static size, but it is not zero sized for sure. In other words, - /// it might contain an incomplete array or some other dynamically sized - /// type. - NonZeroSized, -} - -impl Default for SizednessResult { - fn default() -> Self { - SizednessResult::ZeroSized - } -} - -impl SizednessResult { - /// Take the least upper bound of `self` and `rhs`. - pub fn join(self, rhs: Self) -> Self { - cmp::max(self, rhs) - } -} - -impl ops::BitOr for SizednessResult { - type Output = Self; - - fn bitor(self, rhs: SizednessResult) -> Self::Output { - self.join(rhs) - } -} - -impl ops::BitOrAssign for SizednessResult { - fn bitor_assign(&mut self, rhs: SizednessResult) { - *self = self.join(rhs) - } -} - -/// An analysis that computes the sizedness of all types. -/// -/// * For types with known sizes -- for example pointers, scalars, etc... -- -/// they are assigned `NonZeroSized`. -/// -/// * For compound structure types with one or more fields, they are assigned -/// `NonZeroSized`. -/// -/// * For compound structure types without any fields, the results of the bases -/// are `join`ed. -/// -/// * For type parameters, `DependsOnTypeParam` is assigned. -#[derive(Debug)] -pub struct SizednessAnalysis<'ctx> { - ctx: &'ctx BindgenContext, - dependencies: HashMap>, - // Incremental results of the analysis. Missing entries are implicitly - // considered `ZeroSized`. - sized: HashMap, -} - -impl<'ctx> SizednessAnalysis<'ctx> { - fn consider_edge(kind: EdgeKind) -> bool { - // These are the only edges that can affect whether a type is - // zero-sized or not. - matches!( - kind, - EdgeKind::TemplateArgument | - EdgeKind::TemplateParameterDefinition | - EdgeKind::TemplateDeclaration | - EdgeKind::TypeReference | - EdgeKind::BaseMember | - EdgeKind::Field - ) - } - - /// Insert an incremental result, and return whether this updated our - /// knowledge of types and we should continue the analysis. - fn insert( - &mut self, - id: TypeId, - result: SizednessResult, - ) -> ConstrainResult { - trace!("inserting {:?} for {:?}", result, id); - - if let SizednessResult::ZeroSized = result { - return ConstrainResult::Same; - } - - match self.sized.entry(id) { - Entry::Occupied(mut entry) => { - if *entry.get() < result { - entry.insert(result); - ConstrainResult::Changed - } else { - ConstrainResult::Same - } - } - Entry::Vacant(entry) => { - entry.insert(result); - ConstrainResult::Changed - } - } - } - - fn forward(&mut self, from: TypeId, to: TypeId) -> ConstrainResult { - match self.sized.get(&from).cloned() { - None => ConstrainResult::Same, - Some(r) => self.insert(to, r), - } - } -} - -impl<'ctx> MonotoneFramework for SizednessAnalysis<'ctx> { - type Node = TypeId; - type Extra = &'ctx BindgenContext; - type Output = HashMap; - - fn new(ctx: &'ctx BindgenContext) -> SizednessAnalysis<'ctx> { - let dependencies = generate_dependencies(ctx, Self::consider_edge) - .into_iter() - .filter_map(|(id, sub_ids)| { - id.as_type_id(ctx).map(|id| { - ( - id, - sub_ids - .into_iter() - .filter_map(|s| s.as_type_id(ctx)) - .collect::>(), - ) - }) - }) - .collect(); - - let sized = HashMap::default(); - - SizednessAnalysis { - ctx, - dependencies, - sized, - } - } - - fn initial_worklist(&self) -> Vec { - self.ctx - .allowlisted_items() - .iter() - .cloned() - .filter_map(|id| id.as_type_id(self.ctx)) - .collect() - } - - fn constrain(&mut self, id: TypeId) -> ConstrainResult { - trace!("constrain {:?}", id); - - if let Some(SizednessResult::NonZeroSized) = - self.sized.get(&id).cloned() - { - trace!(" already know it is not zero-sized"); - return ConstrainResult::Same; - } - - if id.has_vtable_ptr(self.ctx) { - trace!(" has an explicit vtable pointer, therefore is not zero-sized"); - return self.insert(id, SizednessResult::NonZeroSized); - } - - let ty = self.ctx.resolve_type(id); - - if id.is_opaque(self.ctx, &()) { - trace!(" type is opaque; checking layout..."); - let result = - ty.layout(self.ctx).map_or(SizednessResult::ZeroSized, |l| { - if l.size == 0 { - trace!(" ...layout has size == 0"); - SizednessResult::ZeroSized - } else { - trace!(" ...layout has size > 0"); - SizednessResult::NonZeroSized - } - }); - return self.insert(id, result); - } - - match *ty.kind() { - TypeKind::Void => { - trace!(" void is zero-sized"); - self.insert(id, SizednessResult::ZeroSized) - } - - TypeKind::TypeParam => { - trace!( - " type params sizedness depends on what they're \ - instantiated as" - ); - self.insert(id, SizednessResult::DependsOnTypeParam) - } - - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Complex(..) | - TypeKind::Function(..) | - TypeKind::Enum(..) | - TypeKind::Reference(..) | - TypeKind::NullPtr | - TypeKind::ObjCId | - TypeKind::ObjCSel | - TypeKind::Pointer(..) => { - trace!(" {:?} is known not to be zero-sized", ty.kind()); - self.insert(id, SizednessResult::NonZeroSized) - } - - TypeKind::ObjCInterface(..) => { - trace!(" obj-c interfaces always have at least the `isa` pointer"); - self.insert(id, SizednessResult::NonZeroSized) - } - - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::BlockPointer(t) | - TypeKind::ResolvedTypeRef(t) => { - trace!(" aliases and type refs forward to their inner type"); - self.forward(t, id) - } - - TypeKind::TemplateInstantiation(ref inst) => { - trace!( - " template instantiations are zero-sized if their \ - definition is zero-sized" - ); - self.forward(inst.template_definition(), id) - } - - TypeKind::Array(_, 0) => { - trace!(" arrays of zero elements are zero-sized"); - self.insert(id, SizednessResult::ZeroSized) - } - TypeKind::Array(..) => { - trace!(" arrays of > 0 elements are not zero-sized"); - self.insert(id, SizednessResult::NonZeroSized) - } - TypeKind::Vector(..) => { - trace!(" vectors are not zero-sized"); - self.insert(id, SizednessResult::NonZeroSized) - } - - TypeKind::Comp(ref info) => { - trace!(" comp considers its own fields and bases"); - - if !info.fields().is_empty() { - return self.insert(id, SizednessResult::NonZeroSized); - } - - let result = info - .base_members() - .iter() - .filter_map(|base| self.sized.get(&base.ty)) - .fold(SizednessResult::ZeroSized, |a, b| a.join(*b)); - - self.insert(id, result) - } - - TypeKind::Opaque => { - unreachable!("covered by the .is_opaque() check above") - } - - TypeKind::UnresolvedTypeRef(..) => { - unreachable!("Should have been resolved after parsing!"); - } - } - } - - fn each_depending_on(&self, id: TypeId, mut f: F) - where - F: FnMut(TypeId), - { - if let Some(edges) = self.dependencies.get(&id) { - for ty in edges { - trace!("enqueue {:?} into worklist", ty); - f(*ty); - } - } - } -} - -impl<'ctx> From> for HashMap { - fn from(analysis: SizednessAnalysis<'ctx>) -> Self { - // We let the lack of an entry mean "ZeroSized" to save space. - extra_assert!(analysis - .sized - .values() - .all(|v| { *v != SizednessResult::ZeroSized })); - - analysis.sized - } -} - -/// A convenience trait for querying whether some type or id is sized. -/// -/// This is not for _computing_ whether the thing is sized, it is for looking up -/// the results of the `Sizedness` analysis's computations for a specific thing. -pub trait Sizedness { - /// Get the sizedness of this type. - fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult; - - /// Is the sizedness for this type `SizednessResult::ZeroSized`? - fn is_zero_sized(&self, ctx: &BindgenContext) -> bool { - self.sizedness(ctx) == SizednessResult::ZeroSized - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/template_params.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/template_params.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/template_params.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/analysis/template_params.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,608 +0,0 @@ -//! Discover which template type parameters are actually used. -//! -//! ### Why do we care? -//! -//! C++ allows ignoring template parameters, while Rust does not. Usually we can -//! blindly stick a `PhantomData` inside a generic Rust struct to make up for -//! this. That doesn't work for templated type aliases, however: -//! -//! ```C++ -//! template -//! using Fml = int; -//! ``` -//! -//! If we generate the naive Rust code for this alias, we get: -//! -//! ```ignore -//! pub type Fml = ::std::os::raw::int; -//! ``` -//! -//! And this is rejected by `rustc` due to the unused type parameter. -//! -//! (Aside: in these simple cases, `libclang` will often just give us the -//! aliased type directly, and we will never even know we were dealing with -//! aliases, let alone templated aliases. It's the more convoluted scenarios -//! where we get to have some fun...) -//! -//! For such problematic template aliases, we could generate a tuple whose -//! second member is a `PhantomData`. Or, if we wanted to go the extra mile, -//! we could even generate some smarter wrapper that implements `Deref`, -//! `DerefMut`, `From`, `Into`, `AsRef`, and `AsMut` to the actually aliased -//! type. However, this is still lackluster: -//! -//! 1. Even with a billion conversion-trait implementations, using the generated -//! bindings is rather un-ergonomic. -//! 2. With either of these solutions, we need to keep track of which aliases -//! we've transformed like this in order to generate correct uses of the -//! wrapped type. -//! -//! Given that we have to properly track which template parameters ended up used -//! for (2), we might as well leverage that information to make ergonomic -//! bindings that don't contain any unused type parameters at all, and -//! completely avoid the pain of (1). -//! -//! ### How do we determine which template parameters are used? -//! -//! Determining which template parameters are actually used is a trickier -//! problem than it might seem at a glance. On the one hand, trivial uses are -//! easy to detect: -//! -//! ```C++ -//! template -//! class Foo { -//! T trivial_use_of_t; -//! }; -//! ``` -//! -//! It gets harder when determining if one template parameter is used depends on -//! determining if another template parameter is used. In this example, whether -//! `U` is used depends on whether `T` is used. -//! -//! ```C++ -//! template -//! class DoesntUseT { -//! int x; -//! }; -//! -//! template -//! class Fml { -//! DoesntUseT lololol; -//! }; -//! ``` -//! -//! We can express the set of used template parameters as a constraint solving -//! problem (where the set of template parameters used by a given IR item is the -//! union of its sub-item's used template parameters) and iterate to a -//! fixed-point. -//! -//! We use the `ir::analysis::MonotoneFramework` infrastructure for this -//! fix-point analysis, where our lattice is the mapping from each IR item to -//! the powerset of the template parameters that appear in the input C++ header, -//! our join function is set union. The set of template parameters appearing in -//! the program is finite, as is the number of IR items. We start at our -//! lattice's bottom element: every item mapping to an empty set of template -//! parameters. Our analysis only adds members to each item's set of used -//! template parameters, never removes them, so it is monotone. Because our -//! lattice is finite and our constraint function is monotone, iteration to a -//! fix-point will terminate. -//! -//! See `src/ir/analysis.rs` for more. - -use super::{ConstrainResult, MonotoneFramework}; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::item::{Item, ItemSet}; -use crate::ir::template::{TemplateInstantiation, TemplateParameters}; -use crate::ir::traversal::{EdgeKind, Trace}; -use crate::ir::ty::TypeKind; -use crate::{HashMap, HashSet}; - -/// An analysis that finds for each IR item its set of template parameters that -/// it uses. -/// -/// We use the monotone constraint function `template_param_usage`, defined as -/// follows: -/// -/// * If `T` is a named template type parameter, it trivially uses itself: -/// -/// ```ignore -/// template_param_usage(T) = { T } -/// ``` -/// -/// * If `inst` is a template instantiation, `inst.args` are the template -/// instantiation's template arguments, `inst.def` is the template definition -/// being instantiated, and `inst.def.params` is the template definition's -/// template parameters, then the instantiation's usage is the union of each -/// of its arguments' usages *if* the corresponding template parameter is in -/// turn used by the template definition: -/// -/// ```ignore -/// template_param_usage(inst) = union( -/// template_param_usage(inst.args[i]) -/// for i in 0..length(inst.args.length) -/// if inst.def.params[i] in template_param_usage(inst.def) -/// ) -/// ``` -/// -/// * Finally, for all other IR item kinds, we use our lattice's `join` -/// operation: set union with each successor of the given item's template -/// parameter usage: -/// -/// ```ignore -/// template_param_usage(v) = -/// union(template_param_usage(w) for w in successors(v)) -/// ``` -/// -/// Note that we ignore certain edges in the graph, such as edges from a -/// template declaration to its template parameters' definitions for this -/// analysis. If we didn't, then we would mistakenly determine that ever -/// template parameter is always used. -/// -/// The final wrinkle is handling of blocklisted types. Normally, we say that -/// the set of allowlisted items is the transitive closure of items explicitly -/// called out for allowlisting, *without* any items explicitly called out as -/// blocklisted. However, for the purposes of this analysis's correctness, we -/// simplify and consider run the analysis on the full transitive closure of -/// allowlisted items. We do, however, treat instantiations of blocklisted items -/// specially; see `constrain_instantiation_of_blocklisted_template` and its -/// documentation for details. -#[derive(Debug, Clone)] -pub struct UsedTemplateParameters<'ctx> { - ctx: &'ctx BindgenContext, - - // The Option is only there for temporary moves out of the hash map. See the - // comments in `UsedTemplateParameters::constrain` below. - used: HashMap>, - - dependencies: HashMap>, - - // The set of allowlisted items, without any blocklisted items reachable - // from the allowlisted items which would otherwise be considered - // allowlisted as well. - allowlisted_items: HashSet, -} - -impl<'ctx> UsedTemplateParameters<'ctx> { - fn consider_edge(kind: EdgeKind) -> bool { - match kind { - // For each of these kinds of edges, if the referent uses a template - // parameter, then it should be considered that the origin of the - // edge also uses the template parameter. - EdgeKind::TemplateArgument | - EdgeKind::BaseMember | - EdgeKind::Field | - EdgeKind::Constructor | - EdgeKind::Destructor | - EdgeKind::VarType | - EdgeKind::FunctionReturn | - EdgeKind::FunctionParameter | - EdgeKind::TypeReference => true, - - // An inner var or type using a template parameter is orthogonal - // from whether we use it. See template-param-usage-{6,11}.hpp. - EdgeKind::InnerVar | EdgeKind::InnerType => false, - - // We can't emit machine code for new monomorphizations of class - // templates' methods (and don't detect explicit instantiations) so - // we must ignore template parameters that are only used by - // methods. This doesn't apply to a function type's return or - // parameter types, however, because of type aliases of function - // pointers that use template parameters, eg - // tests/headers/struct_with_typedef_template_arg.hpp - EdgeKind::Method => false, - - // If we considered these edges, we would end up mistakenly claiming - // that every template parameter always used. - EdgeKind::TemplateDeclaration | - EdgeKind::TemplateParameterDefinition => false, - - // Since we have to be careful about which edges we consider for - // this analysis to be correct, we ignore generic edges. We also - // avoid a `_` wild card to force authors of new edge kinds to - // determine whether they need to be considered by this analysis. - EdgeKind::Generic => false, - } - } - - fn take_this_id_usage_set>( - &mut self, - this_id: Id, - ) -> ItemSet { - let this_id = this_id.into(); - self.used - .get_mut(&this_id) - .expect( - "Should have a set of used template params for every item \ - id", - ) - .take() - .expect( - "Should maintain the invariant that all used template param \ - sets are `Some` upon entry of `constrain`", - ) - } - - /// We say that blocklisted items use all of their template parameters. The - /// blocklisted type is most likely implemented explicitly by the user, - /// since it won't be in the generated bindings, and we don't know exactly - /// what they'll to with template parameters, but we can push the issue down - /// the line to them. - fn constrain_instantiation_of_blocklisted_template( - &self, - this_id: ItemId, - used_by_this_id: &mut ItemSet, - instantiation: &TemplateInstantiation, - ) { - trace!( - " instantiation of blocklisted template, uses all template \ - arguments" - ); - - let args = instantiation - .template_arguments() - .iter() - .map(|a| { - a.into_resolver() - .through_type_refs() - .through_type_aliases() - .resolve(self.ctx) - .id() - }) - .filter(|a| *a != this_id) - .flat_map(|a| { - self.used - .get(&a) - .expect("Should have a used entry for the template arg") - .as_ref() - .expect( - "Because a != this_id, and all used template \ - param sets other than this_id's are `Some`, \ - a's used template param set should be `Some`", - ) - .iter() - .cloned() - }); - - used_by_this_id.extend(args); - } - - /// A template instantiation's concrete template argument is only used if - /// the template definition uses the corresponding template parameter. - fn constrain_instantiation( - &self, - this_id: ItemId, - used_by_this_id: &mut ItemSet, - instantiation: &TemplateInstantiation, - ) { - trace!(" template instantiation"); - - let decl = self.ctx.resolve_type(instantiation.template_definition()); - let args = instantiation.template_arguments(); - - let params = decl.self_template_params(self.ctx); - - debug_assert!(this_id != instantiation.template_definition()); - let used_by_def = self.used - .get(&instantiation.template_definition().into()) - .expect("Should have a used entry for instantiation's template definition") - .as_ref() - .expect("And it should be Some because only this_id's set is None, and an \ - instantiation's template definition should never be the \ - instantiation itself"); - - for (arg, param) in args.iter().zip(params.iter()) { - trace!( - " instantiation's argument {:?} is used if definition's \ - parameter {:?} is used", - arg, - param - ); - - if used_by_def.contains(¶m.into()) { - trace!(" param is used by template definition"); - - let arg = arg - .into_resolver() - .through_type_refs() - .through_type_aliases() - .resolve(self.ctx) - .id(); - - if arg == this_id { - continue; - } - - let used_by_arg = self - .used - .get(&arg) - .expect("Should have a used entry for the template arg") - .as_ref() - .expect( - "Because arg != this_id, and all used template \ - param sets other than this_id's are `Some`, \ - arg's used template param set should be \ - `Some`", - ) - .iter() - .cloned(); - used_by_this_id.extend(used_by_arg); - } - } - } - - /// The join operation on our lattice: the set union of all of this id's - /// successors. - fn constrain_join(&self, used_by_this_id: &mut ItemSet, item: &Item) { - trace!(" other item: join with successors' usage"); - - item.trace( - self.ctx, - &mut |sub_id, edge_kind| { - // Ignore ourselves, since union with ourself is a - // no-op. Ignore edges that aren't relevant to the - // analysis. - if sub_id == item.id() || !Self::consider_edge(edge_kind) { - return; - } - - let used_by_sub_id = self - .used - .get(&sub_id) - .expect("Should have a used set for the sub_id successor") - .as_ref() - .expect( - "Because sub_id != id, and all used template \ - param sets other than id's are `Some`, \ - sub_id's used template param set should be \ - `Some`", - ) - .iter() - .cloned(); - - trace!( - " union with {:?}'s usage: {:?}", - sub_id, - used_by_sub_id.clone().collect::>() - ); - - used_by_this_id.extend(used_by_sub_id); - }, - &(), - ); - } -} - -impl<'ctx> MonotoneFramework for UsedTemplateParameters<'ctx> { - type Node = ItemId; - type Extra = &'ctx BindgenContext; - type Output = HashMap; - - fn new(ctx: &'ctx BindgenContext) -> UsedTemplateParameters<'ctx> { - let mut used = HashMap::default(); - let mut dependencies = HashMap::default(); - let allowlisted_items: HashSet<_> = - ctx.allowlisted_items().iter().cloned().collect(); - - let allowlisted_and_blocklisted_items: ItemSet = allowlisted_items - .iter() - .cloned() - .flat_map(|i| { - let mut reachable = vec![i]; - i.trace( - ctx, - &mut |s, _| { - reachable.push(s); - }, - &(), - ); - reachable - }) - .collect(); - - for item in allowlisted_and_blocklisted_items { - dependencies.entry(item).or_insert_with(Vec::new); - used.entry(item).or_insert_with(|| Some(ItemSet::new())); - - { - // We reverse our natural IR graph edges to find dependencies - // between nodes. - item.trace( - ctx, - &mut |sub_item: ItemId, _| { - used.entry(sub_item) - .or_insert_with(|| Some(ItemSet::new())); - dependencies - .entry(sub_item) - .or_insert_with(Vec::new) - .push(item); - }, - &(), - ); - } - - // Additionally, whether a template instantiation's template - // arguments are used depends on whether the template declaration's - // generic template parameters are used. - let item_kind = - ctx.resolve_item(item).as_type().map(|ty| ty.kind()); - if let Some(&TypeKind::TemplateInstantiation(ref inst)) = item_kind - { - let decl = ctx.resolve_type(inst.template_definition()); - let args = inst.template_arguments(); - - // Although template definitions should always have - // template parameters, there is a single exception: - // opaque templates. Hence the unwrap_or. - let params = decl.self_template_params(ctx); - - for (arg, param) in args.iter().zip(params.iter()) { - let arg = arg - .into_resolver() - .through_type_aliases() - .through_type_refs() - .resolve(ctx) - .id(); - - let param = param - .into_resolver() - .through_type_aliases() - .through_type_refs() - .resolve(ctx) - .id(); - - used.entry(arg).or_insert_with(|| Some(ItemSet::new())); - used.entry(param).or_insert_with(|| Some(ItemSet::new())); - - dependencies - .entry(arg) - .or_insert_with(Vec::new) - .push(param); - } - } - } - - if cfg!(feature = "testing_only_extra_assertions") { - // Invariant: The `used` map has an entry for every allowlisted - // item, as well as all explicitly blocklisted items that are - // reachable from allowlisted items. - // - // Invariant: the `dependencies` map has an entry for every - // allowlisted item. - // - // (This is so that every item we call `constrain` on is guaranteed - // to have a set of template parameters, and we can allow - // blocklisted templates to use all of their parameters). - for item in allowlisted_items.iter() { - extra_assert!(used.contains_key(item)); - extra_assert!(dependencies.contains_key(item)); - item.trace( - ctx, - &mut |sub_item, _| { - extra_assert!(used.contains_key(&sub_item)); - extra_assert!(dependencies.contains_key(&sub_item)); - }, - &(), - ) - } - } - - UsedTemplateParameters { - ctx, - used, - dependencies, - allowlisted_items, - } - } - - fn initial_worklist(&self) -> Vec { - // The transitive closure of all allowlisted items, including explicitly - // blocklisted items. - self.ctx - .allowlisted_items() - .iter() - .cloned() - .flat_map(|i| { - let mut reachable = vec![i]; - i.trace( - self.ctx, - &mut |s, _| { - reachable.push(s); - }, - &(), - ); - reachable - }) - .collect() - } - - fn constrain(&mut self, id: ItemId) -> ConstrainResult { - // Invariant: all hash map entries' values are `Some` upon entering and - // exiting this method. - extra_assert!(self.used.values().all(|v| v.is_some())); - - // Take the set for this id out of the hash map while we mutate it based - // on other hash map entries. We *must* put it back into the hash map at - // the end of this method. This allows us to side-step HashMap's lack of - // an analog to slice::split_at_mut. - let mut used_by_this_id = self.take_this_id_usage_set(id); - - trace!("constrain {:?}", id); - trace!(" initially, used set is {:?}", used_by_this_id); - - let original_len = used_by_this_id.len(); - - let item = self.ctx.resolve_item(id); - let ty_kind = item.as_type().map(|ty| ty.kind()); - match ty_kind { - // Named template type parameters trivially use themselves. - Some(&TypeKind::TypeParam) => { - trace!(" named type, trivially uses itself"); - used_by_this_id.insert(id); - } - // Template instantiations only use their template arguments if the - // template definition uses the corresponding template parameter. - Some(&TypeKind::TemplateInstantiation(ref inst)) => { - if self - .allowlisted_items - .contains(&inst.template_definition().into()) - { - self.constrain_instantiation( - id, - &mut used_by_this_id, - inst, - ); - } else { - self.constrain_instantiation_of_blocklisted_template( - id, - &mut used_by_this_id, - inst, - ); - } - } - // Otherwise, add the union of each of its referent item's template - // parameter usage. - _ => self.constrain_join(&mut used_by_this_id, item), - } - - trace!(" finally, used set is {:?}", used_by_this_id); - - let new_len = used_by_this_id.len(); - assert!( - new_len >= original_len, - "This is the property that ensures this function is monotone -- \ - if it doesn't hold, the analysis might never terminate!" - ); - - // Put the set back in the hash map and restore our invariant. - debug_assert!(self.used[&id].is_none()); - self.used.insert(id, Some(used_by_this_id)); - extra_assert!(self.used.values().all(|v| v.is_some())); - - if new_len != original_len { - ConstrainResult::Changed - } else { - ConstrainResult::Same - } - } - - fn each_depending_on(&self, item: ItemId, mut f: F) - where - F: FnMut(ItemId), - { - if let Some(edges) = self.dependencies.get(&item) { - for item in edges { - trace!("enqueue {:?} into worklist", item); - f(*item); - } - } - } -} - -impl<'ctx> From> for HashMap { - fn from(used_templ_params: UsedTemplateParameters<'ctx>) -> Self { - used_templ_params - .used - .into_iter() - .map(|(k, v)| (k, v.unwrap())) - .collect() - } -} diff -Nru clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/annotations.rs clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/annotations.rs --- clamav-1.0.1+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/annotations.rs 2023-02-13 06:00:43.000000000 +0000 +++ clamav-1.0.2+dfsg/libclamav_rust/.cargo/vendor/bindgen/src/ir/annotations.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,211 +0,0 @@ -//! Types and functions related to bindgen annotation comments. -//! -//! Users can add annotations in doc comments to types that they would like to -//! replace other types with, mark as opaque, etc. This module deals with all of -//! that stuff. - -use crate::clang; - -/// What kind of accessor should we provide for a field? -#[derive(Copy, PartialEq, Clone, Debug)] -pub enum FieldAccessorKind { - /// No accessor. - None, - /// Plain accessor. - Regular, - /// Unsafe accessor. - Unsafe, - /// Immutable accessor. - Immutable, -} - -/// Annotations for a given item, or a field. -/// -/// You can see the kind of comments that are accepted in the Doxygen -/// documentation: -/// -/// http://www.stack.nl/~dimitri/doxygen/manual/docblocks.html -#[derive(Default, Clone, PartialEq, Debug)] -pub struct Annotations { - /// Whether this item is marked as opaque. Only applies to types. - opaque: bool, - /// Whether this item should be hidden from the output. Only applies to - /// types, or enum variants. - hide: bool, - /// Whether this type should be replaced by another. The name is a - /// namespace-aware path. - use_instead_of: Option>, - /// Manually disable deriving copy/clone on this type. Only applies to - /// struct or union types. - disallow_copy: bool, - /// Manually disable deriving debug on this type. - disallow_debug: bool, - /// Manually disable deriving/implement default on this type. - disallow_default: bool, - /// Whether to add a #[must_use] annotation to this type. - must_use_type: bool, - /// Whether fields should be marked as private or not. You can set this on - /// structs (it will apply to all the fields), or individual fields. - private_fields: Option, - /// The kind of accessor this field will have. Also can be applied to - /// structs so all the fields inside share it by default. - accessor_kind: Option, - /// Whether this enum variant should be constified. - /// - /// This is controlled by the `constant` attribute, this way: - /// - /// ```cpp - /// enum Foo { - /// Bar = 0, /**<