diff -Nru bit-babbler-0.7/configure bit-babbler-0.8/configure --- bit-babbler-0.7/configure 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/configure 2018-02-07 23:56:52.000000000 +0000 @@ -1,7 +1,7 @@ #! /bin/sh -# From configure.ac generated by Makeup 0.31. +# From configure.ac generated by Makeup 0.32. # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for bit-babbler 0.7. +# Generated by GNU Autoconf 2.69 for bit-babbler 0.8. # # Report bugs to . # @@ -12,7 +12,7 @@ # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. # -# Copyright (C) 2003 - 2008, Ron +# Copyright (C) 2003 - 2018, Ron ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## @@ -583,8 +583,8 @@ # Identity of this package. PACKAGE_NAME='bit-babbler' PACKAGE_TARNAME='bit-babbler' -PACKAGE_VERSION='0.7' -PACKAGE_STRING='bit-babbler 0.7' +PACKAGE_VERSION='0.8' +PACKAGE_STRING='bit-babbler 0.8' PACKAGE_BUGREPORT='ron@debian.org' PACKAGE_URL='' @@ -636,6 +636,7 @@ USB_CPPFLAGS LIBUSB_DIR THREAD_STACK_SIZE +SEEDD_CONTROL_SOCKET eg_path_iconv POSUB LTLIBINTL @@ -705,6 +706,10 @@ LDFLAGS CFLAGS CC +CXX_SEARCH +CC_SEARCH +CXX_STANDARD +C_STANDARD host_os host_vendor host_cpu @@ -778,6 +783,10 @@ ac_precious_vars='build_alias host_alias target_alias +C_STANDARD +CXX_STANDARD +CC_SEARCH +CXX_SEARCH CC CFLAGS LDFLAGS @@ -810,6 +819,7 @@ MSGINIT ALL_LINGUAS GETTEXT_MSG_SRC +SEEDD_CONTROL_SOCKET THREAD_STACK_SIZE LIBUSB_DIR USB_CPPFLAGS @@ -1356,7 +1366,7 @@ # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures bit-babbler 0.7 to adapt to many kinds of systems. +\`configure' configures bit-babbler 0.8 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1421,7 +1431,7 @@ if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of bit-babbler 0.7:";; + short | recursive ) echo "Configuration of bit-babbler 0.8:";; esac cat <<\_ACEOF @@ -1465,6 +1475,11 @@ Linux, else no) Some influential environment variables: + C_STANDARD flags to set the compiler C standard to use + CXX_STANDARD + flags to set the compiler C++ standard to use + CC_SEARCH space separated list of which C compiler to prefer + CXX_SEARCH space separated list of which C++ compiler to prefer CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a @@ -1509,6 +1524,8 @@ ALL_LINGUAS The list of supported ISO 639 language codes GETTEXT_MSG_SRC Limit the search for messages to $(GETTEXT_MSG_SRC)/ + SEEDD_CONTROL_SOCKET + Set the default to use for the seedd control socket THREAD_STACK_SIZE Explicitly set the per-thread stack size in kB (if non-zero) LIBUSB_DIR Path for libusb (mostly for cross-compiling) @@ -1586,14 +1603,14 @@ test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -bit-babbler configure 0.7 +bit-babbler configure 0.8 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. -Copyright (C) 2003 - 2008, Ron +Copyright (C) 2003 - 2018, Ron _ACEOF exit fi @@ -2166,7 +2183,7 @@ This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by bit-babbler $as_me 0.7, which was +It was created by bit-babbler $as_me 0.8, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2785,6 +2802,9 @@ +# Select the default language standard to use. +CXX_STANDARD="-std=gnu++98" + # Don't let these get set by AC_PROG_* below. @@ -2793,205 +2813,46 @@ CXXFLAGS=${CXXFLAGS-} -# Check standard tools. -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. -set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -fi -if test -z "$ac_cv_prog_CC"; then - ac_ct_CC=$CC - # Extract the first word of "gcc", so it can be a program name with args. -set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -else - CC="$ac_cv_prog_CC" -fi -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. -set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS +# Oddly enough, the most preferred compiler is a platform specific thing, not a +# universal truth. Who could have guessed ... +case $host in + *-*-openbsd* ) + if test -z "$CC_SEARCH"; then : + CC_SEARCH="clang gcc cc" fi + if test -z "$CXX_SEARCH"; then : + CXX_SEARCH="clang++ g++ c++" fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - fi -fi -if test -z "$CC"; then - # Extract the first word of "cc", so it can be a program name with args. -set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else - ac_prog_rejected=no -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then - ac_prog_rejected=yes - continue - fi - ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS + ;; -if test $ac_prog_rejected = yes; then - # We found a bogon in the path, so make sure we never use it. - set dummy $ac_cv_prog_CC - shift - if test $# != 0; then - # We chose a different compiler from the bogus one. - # However, it has the same basename, so the bogon will be chosen - # first if we set CC to just the basename; use the full file name. - shift - ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" - fi -fi -fi + * ) + if test -z "$CC_SEARCH"; then : + CC_SEARCH="gcc clang cc" fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + if test -z "$CXX_SEARCH"; then : + CXX_SEARCH="g++ clang++ c++" fi + ;; +esac -fi -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - for ac_prog in cl.exe +# Check standard tools. + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + for ac_prog in $CC_SEARCH do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 @@ -3035,7 +2896,7 @@ fi if test -z "$CC"; then ac_ct_CC=$CC - for ac_prog in cl.exe + for ac_prog in $CC_SEARCH do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 @@ -3090,8 +2951,6 @@ fi fi -fi - test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} @@ -3731,7 +3590,7 @@ CXX=$CCC else if test -n "$ac_tool_prefix"; then - for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC + for ac_prog in $CXX_SEARCH do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 @@ -3775,7 +3634,7 @@ fi if test -z "$CXX"; then ac_ct_CXX=$CXX - for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC + for ac_prog in $CXX_SEARCH do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 @@ -4112,6 +3971,13 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$C_STANDARD"; then : + CPP="$CPP $C_STANDARD" +fi +if test -n "$CXX_STANDARD"; then : + CXXCPP="$CXXCPP $CXX_STANDARD" +fi + for ac_prog in flex lex do @@ -4836,55 +4702,6 @@ -PTHREAD_CPPFLAGS="-pthread" -PTHREAD_LDFLAGS="-pthread" - -save_CPPFLAGS=$CPPFLAGS -CPPFLAGS="$CPPFLAGS $PTHREAD_CPPFLAGS" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if _REENTRANT is defined by the compiler" >&5 -$as_echo_n "checking if _REENTRANT is defined by the compiler... " >&6; } -if ${ac_cv_have_reentrant+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#ifndef _REENTRANT -#error "_REENTRANT was not defined" -#endif - -int -main () -{ - - ; - return 0; -} - -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - ac_cv_have_reentrant=yes -else - ac_cv_have_reentrant=no - -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_reentrant" >&5 -$as_echo "$ac_cv_have_reentrant" >&6; } - -CPPFLAGS=$save_CPPFLAGS - -if test "$ac_cv_have_reentrant" != "yes"; then : - - PTHREAD_CPPFLAGS="$PTHREAD_CPPFLAGS -D_REENTRANT" - -fi - - # Check standard args. # Check whether --enable-pipe was given. @@ -5003,137 +4820,711 @@ -RC_SEP="#" +RC_SEP="#" + +case $host in + + *-*-linux* ) + + MAKEUP_HOST_ARCH="ELF" + + makeup_build_platform="linux" + + DSOEXT=".so" + + + if test "$ac_cv_enable_shared" = yes; then : + PICFLAGS="-fPIC" +fi + ;; + + *-*-*bsd* | *-*-darwin* ) + + MAKEUP_HOST_ARCH="ELF" + makeup_build_platform="bsd" + + DSOEXT=".so" + + if test "$ac_cv_enable_shared" = yes; then : + PICFLAGS="-fPIC" +fi + ;; + + *-*-cygwin* | *-*-mingw32* ) + + MAKEUP_HOST_ARCH="PE" + makeup_build_platform="msw" + + DSOEXT=".dll" + + if test "$ac_cv_enable_shared" = yes; then : + PICFLAGS="-D_DLL=1 -D_WINDLL=1" +fi + + + WINRCFLAGS="--include-dir /usr/$host_alias/include" + + if test -n "$ac_cv_with_wx_build_dir"; then : + + WINRCFLAGS="$WINRCFLAGS --include-dir $ac_cv_with_wx_build_dir/../include" + +fi + + WINRCFLAGS="$WINRCFLAGS --define __WIN32__ --define __WIN95__ --define __GNUWIN32__" + + RC_SEP= + ;; + * ) + as_fn_error $? "Unknown host type. Stopping." "$LINENO" 5 +esac + +if test "$ac_cv_enable_debug" = yes; then : + makeup_build_flavour=d +else + makeup_build_flavour=r +fi + +if test "$ac_cv_enable_shared" = yes; then : + + MAKEUP_DEFAULT_LINKAGE="shared" + +else + + MAKEUP_DEFAULT_LINKAGE="static" + +fi + + +cc_warnings=" -Wall" +cc_fail_on_warn=" -Werror" +cc_extra_warnings=" -W -Wpointer-arith -Wcast-qual -Wcast-align -Wformat=2 -Wfloat-equal" + +cc_optimise=" -O2" +cc_profile=" -pg" +cc_debug=" -g" +cc_pipe=" -pipe" + +c_extra_warnings=" -Wstrict-prototypes -Wmissing-prototypes" + +cxx_extra_warnings=" -Woverloaded-virtual" + +cc_flags= +c_flags= +cxx_flags= + +if test "$ac_cv_enable_pipe" = yes; then : + cc_flags="$cc_flags$cc_pipe" +fi + +if test "$ac_cv_enable_optimisation" = yes; then : + cc_flags="$cc_flags$cc_optimise" +fi + +if test "$ac_cv_enable_debug" = yes; then : + cc_flags="$cc_flags$cc_debug" +fi + +if test "$ac_cv_enable_profiling" = yes; then : + + cc_flags="$cc_flags$cc_profile" + LDFLAGS="$LDFLAGS$cc_profile" + +fi + +cc_flags="$cc_flags$cc_warnings" + +if test "$ac_cv_enable_extra_warnings" = yes; then : + + cc_flags="$cc_flags$cc_extra_warnings" + c_flags="$c_flags$c_extra_warnings" + cxx_flags="$cxx_flags$cxx_extra_warnings" + +fi + +if test "$ac_cv_enable_fail_on_warning" = yes; then : + cc_flags="$cc_flags$cc_fail_on_warn" +fi + +CFLAGS=${CFLAGS:-$cc_flags$c_flags} +CXXFLAGS=${CXXFLAGS:-$cc_flags$cxx_flags} + +if test -n "$C_STANDARD"; then : + CFLAGS="$C_STANDARD $CFLAGS" +fi +if test -n "$CXX_STANDARD"; then : + CXXFLAGS="$CXX_STANDARD $CXXFLAGS" +fi + +# add 's' here and omit ranlib from the build step +ARFLAGS=rDvs + + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +acm_save_CFLAGS="$CFLAGS" +CFLAGS="$CFLAGS -Womg-wtf-not-an-option" + +ACM_C_WARNINGFAIL="" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC unknown warning options are errors" >&5 +$as_echo_n "checking if $CC unknown warning options are errors... " >&6; } +if ${mu_cv_C_flag_uwo+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + mu_cv_C_flag_uwo=no +else + mu_cv_C_flag_uwo=yes + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag_uwo" >&5 +$as_echo "$mu_cv_C_flag_uwo" >&6; } +if test "$mu_cv_C_flag_uwo" = no; then : + + CFLAGS="$acm_save_CFLAGS -Werror=unknown-warning-option -Womg-wtf-not-an-option" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Werror=unknown-warning-option" >&5 +$as_echo_n "checking if $CC supports -Werror=unknown-warning-option... " >&6; } +if ${mu_cv_C_flag_werror_uwo+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + mu_cv_C_flag_werror_uwo=no +else + mu_cv_C_flag_werror_uwo=yes + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag_werror_uwo" >&5 +$as_echo "$mu_cv_C_flag_werror_uwo" >&6; } + if test "$mu_cv_C_flag_werror_uwo" = yes; then : + ACM_C_WARNINGFAIL=" -Werror=unknown-warning-option" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Don't know how to make $CC fail with unknown warning options," >&5 +$as_echo "$as_me: WARNING: Don't know how to make $CC fail with unknown warning options," >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: so later tests may (wrongly) decide to pass them to it anyway." >&5 +$as_echo "$as_me: WARNING: so later tests may (wrongly) decide to pass them to it anyway." >&2;} +fi + +fi + +CFLAGS="$acm_save_CFLAGS" + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +acm_save_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS -Womg-wtf-not-an-option" + +ACM_CXX_WARNINGFAIL="" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX unknown warning options are errors" >&5 +$as_echo_n "checking if $CXX unknown warning options are errors... " >&6; } +if ${mu_cv_CXX_flag_uwo+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + mu_cv_CXX_flag_uwo=no +else + mu_cv_CXX_flag_uwo=yes + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag_uwo" >&5 +$as_echo "$mu_cv_CXX_flag_uwo" >&6; } +if test "$mu_cv_CXX_flag_uwo" = no; then : + + CXXFLAGS="$acm_save_CXXFLAGS -Werror=unknown-warning-option -Womg-wtf-not-an-option" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=unknown-warning-option" >&5 +$as_echo_n "checking if $CXX supports -Werror=unknown-warning-option... " >&6; } +if ${mu_cv_CXX_flag_werror_uwo+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + mu_cv_CXX_flag_werror_uwo=no +else + mu_cv_CXX_flag_werror_uwo=yes + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag_werror_uwo" >&5 +$as_echo "$mu_cv_CXX_flag_werror_uwo" >&6; } + if test "$mu_cv_CXX_flag_werror_uwo" = yes; then : + ACM_CXX_WARNINGFAIL=" -Werror=unknown-warning-option" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Don't know how to make $CXX fail with unknown warning options," >&5 +$as_echo "$as_me: WARNING: Don't know how to make $CXX fail with unknown warning options," >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: so later tests may (wrongly) decide to pass them to it anyway." >&5 +$as_echo "$as_me: WARNING: so later tests may (wrongly) decide to pass them to it anyway." >&2;} +fi + +fi + +CXXFLAGS="$acm_save_CXXFLAGS" + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + +if test "$ac_cv_enable_extra_warnings" = yes; then : + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + + +acm_save_CFLAGS="$CFLAGS" +CFLAGS="$CFLAGS$ACM_C_WARNINGFAIL -Wsuggest-attribute=format" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Wsuggest-attribute=format" >&5 +$as_echo_n "checking if $CC supports -Wsuggest-attribute=format... " >&6; } +if ${mu_cv_C_flag_suggest_attribute_format+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + mu_cv_C_flag_suggest_attribute_format=yes +else + mu_cv_C_flag_suggest_attribute_format=no + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag_suggest_attribute_format" >&5 +$as_echo "$mu_cv_C_flag_suggest_attribute_format" >&6; } +if test "$mu_cv_C_flag_suggest_attribute_format" = yes; then : + CFLAGS="$acm_save_CFLAGS -Wsuggest-attribute=format" +else + CFLAGS="$acm_save_CFLAGS" +fi + +acm_save_CFLAGS="$CFLAGS" +CFLAGS="$CFLAGS$ACM_C_WARNINGFAIL -Wsuggest-attribute=const" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Wsuggest-attribute=const" >&5 +$as_echo_n "checking if $CC supports -Wsuggest-attribute=const... " >&6; } +if ${mu_cv_C_flag_suggest_attribute_const+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + mu_cv_C_flag_suggest_attribute_const=yes +else + mu_cv_C_flag_suggest_attribute_const=no + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag_suggest_attribute_const" >&5 +$as_echo "$mu_cv_C_flag_suggest_attribute_const" >&6; } +if test "$mu_cv_C_flag_suggest_attribute_const" = yes; then : + CFLAGS="$acm_save_CFLAGS -Wsuggest-attribute=const" +else + CFLAGS="$acm_save_CFLAGS" +fi + +acm_save_CFLAGS="$CFLAGS" +CFLAGS="$CFLAGS$ACM_C_WARNINGFAIL -Wsuggest-attribute=pure" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Wsuggest-attribute=pure" >&5 +$as_echo_n "checking if $CC supports -Wsuggest-attribute=pure... " >&6; } +if ${mu_cv_C_flag_suggest_attribute_pure+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + mu_cv_C_flag_suggest_attribute_pure=yes +else + mu_cv_C_flag_suggest_attribute_pure=no + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag_suggest_attribute_pure" >&5 +$as_echo "$mu_cv_C_flag_suggest_attribute_pure" >&6; } +if test "$mu_cv_C_flag_suggest_attribute_pure" = yes; then : + CFLAGS="$acm_save_CFLAGS -Wsuggest-attribute=pure" +else + CFLAGS="$acm_save_CFLAGS" +fi + +acm_save_CFLAGS="$CFLAGS" +CFLAGS="$CFLAGS$ACM_C_WARNINGFAIL -Wsuggest-attribute=noreturn" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Wsuggest-attribute=noreturn" >&5 +$as_echo_n "checking if $CC supports -Wsuggest-attribute=noreturn... " >&6; } +if ${mu_cv_C_flag_suggest_attribute_noreturn+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + mu_cv_C_flag_suggest_attribute_noreturn=yes +else + mu_cv_C_flag_suggest_attribute_noreturn=no + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag_suggest_attribute_noreturn" >&5 +$as_echo "$mu_cv_C_flag_suggest_attribute_noreturn" >&6; } +if test "$mu_cv_C_flag_suggest_attribute_noreturn" = yes; then : + CFLAGS="$acm_save_CFLAGS -Wsuggest-attribute=noreturn" +else + CFLAGS="$acm_save_CFLAGS" +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + + + +acm_save_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS$ACM_CXX_WARNINGFAIL -Wsuggest-attribute=format" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wsuggest-attribute=format" >&5 +$as_echo_n "checking if $CXX supports -Wsuggest-attribute=format... " >&6; } +if ${mu_cv_CXX_flag_suggest_attribute_format+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + mu_cv_CXX_flag_suggest_attribute_format=yes +else + mu_cv_CXX_flag_suggest_attribute_format=no + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag_suggest_attribute_format" >&5 +$as_echo "$mu_cv_CXX_flag_suggest_attribute_format" >&6; } +if test "$mu_cv_CXX_flag_suggest_attribute_format" = yes; then : + CXXFLAGS="$acm_save_CXXFLAGS -Wsuggest-attribute=format" +else + CXXFLAGS="$acm_save_CXXFLAGS" +fi + +acm_save_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS$ACM_CXX_WARNINGFAIL -Wsuggest-attribute=const" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wsuggest-attribute=const" >&5 +$as_echo_n "checking if $CXX supports -Wsuggest-attribute=const... " >&6; } +if ${mu_cv_CXX_flag_suggest_attribute_const+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + mu_cv_CXX_flag_suggest_attribute_const=yes +else + mu_cv_CXX_flag_suggest_attribute_const=no -case $host in +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - *-*-linux* ) +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag_suggest_attribute_const" >&5 +$as_echo "$mu_cv_CXX_flag_suggest_attribute_const" >&6; } +if test "$mu_cv_CXX_flag_suggest_attribute_const" = yes; then : + CXXFLAGS="$acm_save_CXXFLAGS -Wsuggest-attribute=const" +else + CXXFLAGS="$acm_save_CXXFLAGS" +fi - MAKEUP_HOST_ARCH="ELF" +acm_save_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS$ACM_CXX_WARNINGFAIL -Wsuggest-attribute=pure" - makeup_build_platform="linux" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wsuggest-attribute=pure" >&5 +$as_echo_n "checking if $CXX supports -Wsuggest-attribute=pure... " >&6; } +if ${mu_cv_CXX_flag_suggest_attribute_pure+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ - DSOEXT=".so" +int +main () +{ + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + mu_cv_CXX_flag_suggest_attribute_pure=yes +else + mu_cv_CXX_flag_suggest_attribute_pure=no - if test "$ac_cv_enable_shared" = yes; then : - PICFLAGS="-fPIC" fi - ;; - - *-*-*bsd* | *-*-darwin* ) - - MAKEUP_HOST_ARCH="ELF" - makeup_build_platform="bsd" - - DSOEXT=".so" +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - if test "$ac_cv_enable_shared" = yes; then : - PICFLAGS="-fPIC" fi - ;; - - *-*-cygwin* | *-*-mingw32* ) - - MAKEUP_HOST_ARCH="PE" - makeup_build_platform="msw" - - DSOEXT=".dll" - - if test "$ac_cv_enable_shared" = yes; then : - PICFLAGS="-D_DLL=1 -D_WINDLL=1" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag_suggest_attribute_pure" >&5 +$as_echo "$mu_cv_CXX_flag_suggest_attribute_pure" >&6; } +if test "$mu_cv_CXX_flag_suggest_attribute_pure" = yes; then : + CXXFLAGS="$acm_save_CXXFLAGS -Wsuggest-attribute=pure" +else + CXXFLAGS="$acm_save_CXXFLAGS" fi +acm_save_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS$ACM_CXX_WARNINGFAIL -Wsuggest-attribute=noreturn" - WINRCFLAGS="--include-dir /usr/$host_alias/include" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wsuggest-attribute=noreturn" >&5 +$as_echo_n "checking if $CXX supports -Wsuggest-attribute=noreturn... " >&6; } +if ${mu_cv_CXX_flag_suggest_attribute_noreturn+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ - if test -n "$ac_cv_with_wx_build_dir"; then : +int +main () +{ - WINRCFLAGS="$WINRCFLAGS --include-dir $ac_cv_with_wx_build_dir/../include" + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + mu_cv_CXX_flag_suggest_attribute_noreturn=yes +else + mu_cv_CXX_flag_suggest_attribute_noreturn=no fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - WINRCFLAGS="$WINRCFLAGS --define __WIN32__ --define __WIN95__ --define __GNUWIN32__" - - RC_SEP= - ;; - * ) - as_fn_error $? "Unknown host type. Stopping." "$LINENO" 5 -esac - -if test "$ac_cv_enable_debug" = yes; then : - makeup_build_flavour=d +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag_suggest_attribute_noreturn" >&5 +$as_echo "$mu_cv_CXX_flag_suggest_attribute_noreturn" >&6; } +if test "$mu_cv_CXX_flag_suggest_attribute_noreturn" = yes; then : + CXXFLAGS="$acm_save_CXXFLAGS -Wsuggest-attribute=noreturn" else - makeup_build_flavour=r + CXXFLAGS="$acm_save_CXXFLAGS" fi -if test "$ac_cv_enable_shared" = yes; then : - - MAKEUP_DEFAULT_LINKAGE="shared" -else +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu - MAKEUP_DEFAULT_LINKAGE="static" -fi -cc_warnings=" -Wall" -cc_fail_on_warn=" -Werror" -cc_extra_warnings=" -W -Wpointer-arith -Wcast-qual -Wcast-align -Wformat=2 -Wno-format-nonliteral -Wfloat-equal" +fi -cc_optimise=" -O2" -cc_profile=" -pg" -cc_debug=" -g" -cc_pipe=" -pipe" -c_extra_warnings=" -Wstrict-prototypes -Wmissing-prototypes" +PTHREAD_CPPFLAGS="-pthread" +PTHREAD_LDFLAGS="-pthread" -cxx_extra_warnings=" -Woverloaded-virtual" +save_CPPFLAGS=$CPPFLAGS +CPPFLAGS="$CPPFLAGS $PTHREAD_CPPFLAGS" -cc_flags= -c_flags= -cxx_flags= -if test "$ac_cv_enable_pipe" = yes; then : - cc_flags="$cc_flags$cc_pipe" -fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if _REENTRANT is defined by the compiler" >&5 +$as_echo_n "checking if _REENTRANT is defined by the compiler... " >&6; } +if ${ac_cv_have_reentrant+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ -if test "$ac_cv_enable_optimisation" = yes; then : - cc_flags="$cc_flags$cc_optimise" -fi +#ifndef _REENTRANT +#error "_REENTRANT was not defined" +#endif -if test "$ac_cv_enable_debug" = yes; then : - cc_flags="$cc_flags$cc_debug" -fi +int +main () +{ -if test "$ac_cv_enable_profiling" = yes; then : + ; + return 0; +} - cc_flags="$cc_flags$cc_profile" - LDFLAGS="$LDFLAGS$cc_profile" +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + ac_cv_have_reentrant=yes +else + ac_cv_have_reentrant=no fi +rm -f conftest.err conftest.i conftest.$ac_ext -cc_flags="$cc_flags$cc_warnings" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_reentrant" >&5 +$as_echo "$ac_cv_have_reentrant" >&6; } -if test "$ac_cv_enable_extra_warnings" = yes; then : +CPPFLAGS=$save_CPPFLAGS - cc_flags="$cc_flags$cc_extra_warnings" - c_flags="$c_flags$c_extra_warnings" - cxx_flags="$cxx_flags$cxx_extra_warnings" +if test "$ac_cv_have_reentrant" != "yes"; then : -fi + PTHREAD_CPPFLAGS="$PTHREAD_CPPFLAGS -D_REENTRANT" -if test "$ac_cv_enable_fail_on_warning" = yes; then : - cc_flags="$cc_flags$cc_fail_on_warn" fi -CFLAGS=${CFLAGS:-$cc_flags$c_flags} -CXXFLAGS=${CXXFLAGS:-$cc_flags$cxx_flags} - -# add 's' here and omit ranlib from the build step -ARFLAGS=rDvs - if test "$ac_cv_enable_bison_deprecated_warnings" = no; then : @@ -8138,6 +8529,10 @@ ;; *-*-cygwin* | *-*-mingw32* ) + if test -z "$SEEDD_CONTROL_SOCKET"; then : + SEEDD_CONTROL_SOCKET=tcp:localhost:56789 +fi + ac_cv_env_winver=0x0600 ac_cv_env__win32_winnt=0x0600 @@ -8160,20 +8555,31 @@ ;; *-*-openbsd* ) + if test -z "$SEEDD_CONTROL_SOCKET"; then : + SEEDD_CONTROL_SOCKET=/var/run/bit-babbler/seedd.socket +fi -$as_echo "#define HAVE_BROKEN_STDIO_LOCKING 1" >>confdefs.h + if test -z "$THREAD_STACK_SIZE"; then : + THREAD_STACK_SIZE=8192 +fi - THREAD_STACK_SIZE=8192 +$as_echo "#define HAVE_BROKEN_STDIO_LOCKING 1" >>confdefs.h + ;; *-*-freebsd* ) - THREAD_STACK_SIZE=8192 + if test -z "$SEEDD_CONTROL_SOCKET"; then : + SEEDD_CONTROL_SOCKET=/var/run/bit-babbler/seedd.socket +fi + + if test -z "$THREAD_STACK_SIZE"; then : + THREAD_STACK_SIZE=8192 +fi save_CXXFLAGS="$CXXFLAGS" CXXFLAGS="$CXXFLAGS -fno-guess-branch-probability" - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX needs -fno-guess-branch-probability" >&5 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX needs -fno-guess-branch-probability" >&5 $as_echo_n "checking if $CXX needs -fno-guess-branch-probability... " >&6; } if ${bb_cv_flag_guess_branch_probability+:} false; then : $as_echo_n "(cached) " >&6 @@ -8293,11 +8699,33 @@ ;; *-*-darwin* ) - THREAD_STACK_SIZE=8192 + if test -z "$SEEDD_CONTROL_SOCKET"; then : + SEEDD_CONTROL_SOCKET=/var/run/bit-babbler/seedd.socket +fi + + if test -z "$THREAD_STACK_SIZE"; then : + THREAD_STACK_SIZE=8192 +fi ;; esac +if test -z "$SEEDD_CONTROL_SOCKET"; then : + SEEDD_CONTROL_SOCKET=/run/bit-babbler/seedd.socket +fi + + +if test -n "$SEEDD_CONTROL_SOCKET"; then : + + +cat >>confdefs.h <<_ACEOF +#define SEEDD_CONTROL_SOCKET "$SEEDD_CONTROL_SOCKET" +_ACEOF + + +fi + + save_CXXFLAGS="$CXXFLAGS" CXXFLAGS="$CXXFLAGS -Wno-gnu-designator" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX needs -Wno-gnu-designator" >&5 @@ -9343,8 +9771,16 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: Configured bit-babbler $PACKAGE_VERSION" >&5 $as_echo "$as_me: Configured bit-babbler $PACKAGE_VERSION" >&6;} -{ $as_echo "$as_me:${as_lineno-$LINENO}: with udev: $ac_cv_with_udev" >&5 -$as_echo "$as_me: with udev: $ac_cv_with_udev" >&6;} +{ $as_echo "$as_me:${as_lineno-$LINENO}: with udev: $ac_cv_with_udev" >&5 +$as_echo "$as_me: with udev: $ac_cv_with_udev" >&6;} +{ $as_echo "$as_me:${as_lineno-$LINENO}: SEEDD_CONTROL_SOCKET: $SEEDD_CONTROL_SOCKET" >&5 +$as_echo "$as_me: SEEDD_CONTROL_SOCKET: $SEEDD_CONTROL_SOCKET" >&6;} +if test -n "$THREAD_STACK_SIZE"; then : + +{ $as_echo "$as_me:${as_lineno-$LINENO}: THREAD_STACK_SIZE: $THREAD_STACK_SIZE" >&5 +$as_echo "$as_me: THREAD_STACK_SIZE: $THREAD_STACK_SIZE" >&6;} + +fi case $host in @@ -9373,6 +9809,8 @@ ;; esac +ac_config_files="$ac_config_files munin/bit_babbler" + @@ -9917,7 +10355,7 @@ # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by bit-babbler $as_me 0.7, which was +This file was extended by bit-babbler $as_me 0.8, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -9983,7 +10421,7 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -bit-babbler config.status 0.7 +bit-babbler config.status 0.8 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" @@ -10110,9 +10548,9 @@ - makeup_version="0.31" + makeup_version="0.32" package_name="bit-babbler" - package_version="0.7" + package_version="0.8" __package_config_dir="" __package_config_public="setup.h" @@ -10141,6 +10579,7 @@ do case $ac_config_target in "po-directories") CONFIG_COMMANDS="$CONFIG_COMMANDS po-directories" ;; + "munin/bit_babbler") CONFIG_FILES="$CONFIG_FILES munin/bit_babbler" ;; "Makefile") CONFIG_COMMANDS="$CONFIG_COMMANDS Makefile" ;; "Makefile.acsubst") CONFIG_FILES="$CONFIG_FILES Makefile.acsubst:Makeup/gmake-fragments/makefile.acsubst" ;; "include/private_setup.h") CONFIG_HEADERS="$CONFIG_HEADERS include/private_setup.h:private_setup.h.in" ;; @@ -10821,11 +11260,12 @@ ;; esac done ;; + "munin/bit_babbler":F) chmod +x munin/bit_babbler ;; "Makefile":C) cat > Makefile < +# Copyright 2003 - 2017, Ron # # This file is distributed under the terms of the GNU GPL version 2. # @@ -10860,7 +11300,7 @@ cat > $_TEMPFILE < + * Copyright 2003 - 2017, Ron * * This file is distributed under the terms of the GNU GPL version 2. * @@ -10957,16 +11397,27 @@ #endif -// Compiler version test. +// Compiler version tests. // // This macro will return false if the version of gcc in use // is earlier than the specified major, minor limit, or if gcc // is not being used. Otherwise it will evaluate to be true. -#define EM_COMPILER_GCC( major, minor ) \\ - ( defined(__GNUC__) && defined(__GNUC_MINOR__) \\ - && ( ( __GNUC__ > (major) ) \\ - || ( __GNUC__ == (major) && __GNUC_MINOR__ >= (minor) ) ) ) +// This will also be true for the clang compiler, for whatever +// GCC version it is pretending to be compatible with. +#if defined(__GNUC__) && defined(__GNUC_MINOR__) + #define EM_COMPILER_GCC( major, minor ) ( ( __GNUC__ > (major) ) \\ + || ( __GNUC__ == (major) && __GNUC_MINOR__ >= (minor) ) ) +#else + #define EM_COMPILER_GCC( major, minor ) 0 +#endif +// As above, except for the clang compiler instead. +#if defined(__clang_major__) && defined(__clang_minor__) + #define EM_COMPILER_CLANG( major, minor ) ( ( __clang_major__ > (major) ) \\ + || ( __clang_major__ == (major) && __clang_minor__ >= (minor) ) ) +#else + #define EM_COMPILER_CLANG( major, minor ) 0 +#endif #endif // ${_GUARD} diff -Nru bit-babbler-0.7/configure.ac bit-babbler-0.8/configure.ac --- bit-babbler-0.7/configure.ac 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/configure.ac 2018-02-07 23:56:52.000000000 +0000 @@ -1,6 +1,6 @@ -# Makeup 0.31 generated configure.ac. +# Makeup 0.32 generated configure.ac. # Do not edit this file directly, your changes will be lost. -# Copyright (C) 2003 - 2016, Ron +# Copyright (C) 2003 - 2018, Ron # # This file is distributed under the terms of the GNU GPL version 2. # @@ -12,10 +12,10 @@ # and make it clear that your modifications are licenced strictly # according to the GPL 2 or a compatible licence. -AC_INIT([bit-babbler], [0.7], [ron@debian.org]) -AC_COPYRIGHT([Copyright (C) 2003 - 2008, Ron ]) +AC_INIT([bit-babbler], [0.8], [ron@debian.org]) +AC_COPYRIGHT([Copyright (C) 2003 - 2018, Ron ]) AC_PREREQ([2.61]) -AC_REVISION([generated by Makeup 0.31]) +AC_REVISION([generated by Makeup 0.32]) AC_CONFIG_SRCDIR([Makeup/Makeup.conf]) @@ -33,10 +33,13 @@ AC_CANONICAL_BUILD AC_CANONICAL_HOST +# Select the default language standard to use. +CXX_STANDARD="-std=gnu++98" + dnl ----- Begin: configure.stdtools ----- dnl Makeup configure boilerplate. dnl -dnl Copyright 2003 - 2016, Ron +dnl Copyright 2003 - 2018, Ron dnl dnl This file is distributed under the terms of the GNU GPL version 2. dnl @@ -55,13 +58,94 @@ CXXFLAGS=${CXXFLAGS-} +dnl These are separately precious because overriding {C,CXX}FLAGS should not +dnl normally mask the C/C++ standard that a project is built with, and that +dnl might not be an immediately obvious consequence of setting them explictly. +dnl If you really want to override that, do it with these (or by changing the +dnl PACKAGE_{C,XX}STD set for the project), which likewise will also preserve +dnl whatever other compiler flags would normally be used. +AC_ARG_VAR([C_STANDARD], [flags to set the compiler C standard to use]) +AC_ARG_VAR([CXX_STANDARD], [flags to set the compiler C++ standard to use]) + + +dnl Not all platforms have GCC as their default compiler anymore, even if it is +dnl still available by default. Autoconf still prefers to use GCC by default +dnl in the AC_PROG_{CC,CXX} tests though. These variables let the search order +dnl be explicitly specified by the user, and let us automatically tweak it for +dnl different platforms. +AC_ARG_VAR([CC_SEARCH], [space separated list of which C compiler to prefer]) +AC_ARG_VAR([CXX_SEARCH], [space separated list of which C++ compiler to prefer]) + +# Oddly enough, the most preferred compiler is a platform specific thing, not a +# universal truth. Who could have guessed ... + +dnl Keeping this list current with the changing Winds of Whim could become a +dnl rather tedious and fragile thing, so it's tempting to default to checking +dnl for cc and c++ first everywhere, on the assumption that all modern systems +dnl now have that as an alias to their actually preferred toolchains, but that +dnl has the downside of making it less obvious exactly which compiler is being +dnl used, and making it even more fragile if some user has changed it from what +dnl the normal platform default would otherwise be ... So let's see how this +dnl goes for a while. At present the platform needing this most is OpenBSD, +dnl since it still ships an ancient "last of the GPLv2" gcc in its base set, +dnl but actually has clang as its default and preferred compiler. +case $host in + *-*-openbsd* ) + dnl OpenBSD (as of 6.2) still has GCC 4.2.1 installed in its base set, + dnl but "defaults" to clang (which is what /usr/bin/cc points to), so + dnl test for a working clang before gcc there. + AS_IF([test -z "$CC_SEARCH"],[CC_SEARCH="clang gcc cc"]) + AS_IF([test -z "$CXX_SEARCH"],[CXX_SEARCH="clang++ g++ c++"]) + ;; + + * ) + dnl By default, do what autoconf would otherwise do and prefer GCC, + dnl except our second choice is clang (which it entirely ignores), + dnl and we don't bother looking for the obscure C++ compilers which + dnl it would check for if it doesn't find g++ or c++. When someone + dnl proves they want them, and that they can compile our code, then + dnl we can revise this list to add them. + dnl + dnl Ideally, we'd have defaulted to calling AC_PROG_{CC,CXX} with an + dnl empty argument, and just let it do its own default thing, but that + dnl macro is too broken to enable that, it checks if the argument is + dnl empty during the m4 pass, so it considers an empty variable to be + dnl an explicit list (and then fails at runtime with no compilers to + dnl check) - and we can't AS_IF it and call it either with or without + dnl arguments at runtime, because there are tests in there which will + dnl only expand once, and so everything falls apart when they are only + dnl expanded in the dead branch ... The assumption that it will only + dnl ever appear once in one code path goes deep there. + AS_IF([test -z "$CC_SEARCH"],[CC_SEARCH="gcc clang cc"]) + AS_IF([test -z "$CXX_SEARCH"],[CXX_SEARCH="g++ clang++ c++"]) + ;; +esac + + # Check standard tools. -AC_PROG_CC +AC_PROG_CC([$CC_SEARCH]) AC_PROG_CPP -AC_PROG_CXX +AC_PROG_CXX([$CXX_SEARCH]) AC_PROG_CXXCPP +dnl If we explicitly set the C/C++ standard to use, then ensure that is passed +dnl when the preprocessor is run during the tests that follow. This is a bit +dnl sketchy, because really this ought to be done as part of testing for how to +dnl run the preprocessor above - and there are no separate variables for the +dnl preprocessor flags for C and C++, the autoconf tests just use CPPFLAGS for +dnl both, which is a bit difficult when we want to specify a C or C++ standard +dnl to use in mixed code. If we don't do this though, then we can see dodgy or +dnl misleading test results for things like AC_CHECK_HEADERS which runs both +dnl the compiler and preprocessor as separate tests. If CFLAGS or CXXFLAGS set +dnl a standard to use and the preprocessor flags do not, then the results could +dnl be conflicting when things which do vary according to the standard that is +dnl being used are involved. Fortunately, CPP and CXXCPP generally aren't used +dnl very often outside of the feature tests here, and if there is a problem it +dnl will probably shake out fairly early in the first test which does use it. +AS_IF([test -n "$C_STANDARD"],[CPP="$CPP $C_STANDARD"]) +AS_IF([test -n "$CXX_STANDARD"],[CXXCPP="$CXXCPP $CXX_STANDARD"]) + AC_PROG_LEX dnl AC_PROG_YACC dnl Do this instead since we want real bison usually, @@ -80,31 +164,6 @@ AC_CHECK_PROGS([GENHTML],[genhtml],[:]) -PTHREAD_CPPFLAGS="-pthread" -PTHREAD_LDFLAGS="-pthread" - -save_CPPFLAGS=$CPPFLAGS -CPPFLAGS="$CPPFLAGS $PTHREAD_CPPFLAGS" - -AC_CACHE_CHECK([if _REENTRANT is defined by the compiler], [ac_cv_have_reentrant], - [AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ -#ifndef _REENTRANT -#error "_REENTRANT was not defined" -#endif - ]]) - ], - [ac_cv_have_reentrant=yes], - [ac_cv_have_reentrant=no] - )] -) - -CPPFLAGS=$save_CPPFLAGS - -AS_IF([test "$ac_cv_have_reentrant" != "yes"],[ - PTHREAD_CPPFLAGS="$PTHREAD_CPPFLAGS -D_REENTRANT" -]) - - # Check standard args. AC_ARG_ENABLE([pipe], @@ -270,10 +329,7 @@ dnl These may all go in platform dependent conditionals one day. -dnl Note that we disable Wformat-nonliteral from Wformat=2 because -dnl it barks about the Emerald::format_str<> template that we -dnl use to output generic types in some places. -dnl And we no longer use -Wconversion. It was mainly designed as +dnl We no longer use -Wconversion. It was mainly designed as dnl a filter for porting old C code where parameters may be type dnl promoted in the absence of a new style prototype, and should dnl not be used to check new code. False positives in the xlocale @@ -281,7 +337,7 @@ dnl make it officially more trouble than it is presently worth. cc_warnings=" -Wall" cc_fail_on_warn=" -Werror" -cc_extra_warnings=" -W -Wpointer-arith -Wcast-qual -Wcast-align -Wformat=2 -Wno-format-nonliteral -Wfloat-equal" +cc_extra_warnings=" -W -Wpointer-arith -Wcast-qual -Wcast-align -Wformat=2 -Wfloat-equal" cc_optimise=" -O2" cc_profile=" -pg" @@ -324,10 +380,49 @@ CFLAGS=${CFLAGS:-$cc_flags$c_flags} CXXFLAGS=${CXXFLAGS:-$cc_flags$cxx_flags} +AS_IF([test -n "$C_STANDARD"],[CFLAGS="$C_STANDARD $CFLAGS"]) +AS_IF([test -n "$CXX_STANDARD"],[CXXFLAGS="$CXX_STANDARD $CXXFLAGS"]) + # add 's' here and omit ranlib from the build step ARFLAGS=rDvs +dnl We need to test if these extra warnings are actually supported by the +dnl toolchain in use, we can't safely assume that it does with this lot. +dnl The -Wsuggest-attribute options are currently GCC specific. +AS_IF([test "$ac_cv_enable_extra_warnings" = yes],[ + ACM_ADD_COMPILER_WARNING([C,CXX],[suggest-attribute=format, + suggest-attribute=const, + suggest-attribute=pure, + suggest-attribute=noreturn]) +]) + + +PTHREAD_CPPFLAGS="-pthread" +PTHREAD_LDFLAGS="-pthread" + +save_CPPFLAGS=$CPPFLAGS +CPPFLAGS="$CPPFLAGS $PTHREAD_CPPFLAGS" + +AC_CACHE_CHECK([if _REENTRANT is defined by the compiler], [ac_cv_have_reentrant], + [AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ +#ifndef _REENTRANT +#error "_REENTRANT was not defined" +#endif + ]]) + ], + [ac_cv_have_reentrant=yes], + [ac_cv_have_reentrant=no] + )] +) + +CPPFLAGS=$save_CPPFLAGS + +AS_IF([test "$ac_cv_have_reentrant" != "yes"],[ + PTHREAD_CPPFLAGS="$PTHREAD_CPPFLAGS -D_REENTRANT" +]) + + dnl bison3 complains loudly about a bunch of constructs that must still be used dnl if compatibility with bison2 is required, and appears to give us no clean dnl way to deal with that at all. We can tell bison3 not to bark by passing it @@ -524,7 +619,7 @@ dnl ----- Begin: ./Makeup/config/configure.bit-babbler ----- dnl Makeup extra configuration for bit-babbler. dnl -dnl Copyright 2003 - 2017, Ron Lee. +dnl Copyright 2003 - 2018, Ron Lee. dnl AC_LANG_PUSH([C++]) @@ -536,6 +631,10 @@ ;; *-*-cygwin* | *-*-mingw32* ) + dnl We don't have unix domain sockets on windows, so default to TCP there. + AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], + [SEEDD_CONTROL_SOCKET=tcp:localhost:56789]) + dnl We need at least 0x0600 to get AI_ADDRCONFIG for getaddrinfo ac_cv_env_winver=0x0600 ac_cv_env__win32_winnt=0x0600 @@ -554,19 +653,27 @@ ;; *-*-openbsd* ) - AC_DEFINE([HAVE_BROKEN_STDIO_LOCKING],[1], - [Workaround OpenBSD _thread_flockfile cancellation bug]) + dnl We don't have a /run directory by default, so use /var/run + AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], + [SEEDD_CONTROL_SOCKET=/var/run/bit-babbler/seedd.socket]) dnl The default pthread stack size on OpenBSD 6.1 is 512kB, so fix that. - THREAD_STACK_SIZE=8192 + AS_IF([test -z "$THREAD_STACK_SIZE"],[THREAD_STACK_SIZE=8192]) + + AC_DEFINE([HAVE_BROKEN_STDIO_LOCKING],[1], + [Workaround OpenBSD _thread_flockfile cancellation bug]) ;; *-*-freebsd* ) + dnl We don't have a /run directory by default, so use /var/run + AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], + [SEEDD_CONTROL_SOCKET=/var/run/bit-babbler/seedd.socket]) + dnl The default pthread stack size on FreeBSD 11 is 2MB, so fix that. dnl So far we haven't actually had this smash the stack there with dnl the default size (unlike OpenBSD, MacOS and Windows), but let's dnl not wait until we do, just use the same size as everywhere else. - THREAD_STACK_SIZE=8192 + AS_IF([test -z "$THREAD_STACK_SIZE"],[THREAD_STACK_SIZE=8192]) dnl On FreeBSD 11, both gcc6 and gcc7 will miscompile code when the dnl -fguess-branch-probability optimisation is enabled (which it is @@ -643,13 +750,35 @@ ;; *-*-darwin* ) + dnl We don't have a /run directory by default, so use /var/run + AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], + [SEEDD_CONTROL_SOCKET=/var/run/bit-babbler/seedd.socket]) + dnl The default pthread stack size on MacOS is only 512kB, and we expect to dnl need more than that, so bring it into line with the normal Linux default. - THREAD_STACK_SIZE=8192 + AS_IF([test -z "$THREAD_STACK_SIZE"],[THREAD_STACK_SIZE=8192]) ;; esac +dnl /var could be a remote mount which isn't available at early boot when seedd +dnl is first started, but /run is supposed to be ready before any ordinary early +dnl boot process even if it is a separate mount like a tmpfs, so default to it +dnl unless we know it's not expected to be supported. FHS 3.0 allows /var/run +dnl to be an alias to /run, and that is what most (but not all) Linux distros +dnl currently do. The BSDs (aside from Debian's kFreeBSD port) aren't riding +dnl this train yet though, so we still use /var/run there instead of rudely +dnl creating a new directory in the root of people's systems. +AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], + [SEEDD_CONTROL_SOCKET=/run/bit-babbler/seedd.socket]) + +AC_ARG_VAR([SEEDD_CONTROL_SOCKET], [Set the default to use for the seedd control socket]) +AS_IF([test -n "$SEEDD_CONTROL_SOCKET"],[ + AC_DEFINE_UNQUOTED([SEEDD_CONTROL_SOCKET],["$SEEDD_CONTROL_SOCKET"], + [Set the default to use for the seedd control socket]) + ]) + + dnl Clang bitches about "GNU old-style field designator extension" in C++ code dnl even though the C99 designated initialiser style isn't supported by C++, dnl even as a GNU extension. We use the old initialiser style though, so hush @@ -980,7 +1109,11 @@ AC_MSG_NOTICE([Configured bit-babbler $PACKAGE_VERSION]) -AC_MSG_NOTICE([ with udev: $ac_cv_with_udev]) +AC_MSG_NOTICE([ with udev: $ac_cv_with_udev]) +AC_MSG_NOTICE([ SEEDD_CONTROL_SOCKET: $SEEDD_CONTROL_SOCKET]) +AS_IF([test -n "$THREAD_STACK_SIZE"],[ +AC_MSG_NOTICE([ THREAD_STACK_SIZE: $THREAD_STACK_SIZE]) +]) case $host in @@ -1002,6 +1135,7 @@ ;; esac +AC_CONFIG_FILES([munin/bit_babbler],[chmod +x munin/bit_babbler]) dnl ------- End: ./Makeup/config/configure.bit-babbler ----- AC_ARG_VAR([MAKEUP_PLATFORM_HEADER],[platform specific config header]) @@ -1018,9 +1152,9 @@ ACM_CONFIG_MAKEFILE([Makeup/gmake-fragments], [ - makeup_version="0.31" + makeup_version="0.32" package_name="bit-babbler" - package_version="0.7" + package_version="0.8" __package_config_dir="" __package_config_public="setup.h" ]) diff -Nru bit-babbler-0.7/debian/bit-babbler.NEWS bit-babbler-0.8/debian/bit-babbler.NEWS --- bit-babbler-0.7/debian/bit-babbler.NEWS 1970-01-01 00:00:00.000000000 +0000 +++ bit-babbler-0.8/debian/bit-babbler.NEWS 2018-02-07 23:56:52.000000000 +0000 @@ -0,0 +1,32 @@ +bit-babbler (0.8) unstable; urgency=medium + + The 0.8 release changes the way which seedd(1) is normally configured when + started as a system daemon. Previously, configuration options could be set + in /etc/default/seedd, but systemd doesn't support handling those in the way + which we did so in the SysV init script. So now, when seedd is started from + either the seedd.service systemd unit or the SysV init script, it will be + configured using the options set in /etc/bit-babbler/seedd.conf instead. + + If you have customised the configuration in /etc/default/seedd then it will + automatically be preserved during upgrade by generating an equivalent custom + seedd.conf for you. The old configuration file content will be retained in + /etc/default/seedd.dpkg-old, in case there are other things in there (like + comments) that you do wish to keep a note of somewhere, but it can be safely + removed when there is nothing in it which you still need. Nothing in this + package will use anything from /etc/default after this. + + For reference, the packaged default seedd.conf will still be installed (in + the same way as if you had selected "keep your currently-installed version" + at the dpkg conffile prompt) as /etc/bit-babbler/seedd.conf.dpkg-new for you + to inspect. You can safely modify the generated seedd.conf however you wish + after this, as it will not be (re)generated again on future updates. Future + updates will be handled with the normal dpkg mechanism for conffiles when + there are any changes in the packaged and locally installed versions which + need to be resolved. + + If you have not modified /etc/default/seedd, then it will simply be removed + and the equivalent default seedd.conf will be installed, making this change + (even more) completely transparent to you. + + -- Ron Lee Thu, 08 Feb 2018 10:26:52 +1030 + diff -Nru bit-babbler-0.7/debian/bit-babbler.postinst bit-babbler-0.8/debian/bit-babbler.postinst --- bit-babbler-0.7/debian/bit-babbler.postinst 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/debian/bit-babbler.postinst 2018-02-07 23:56:52.000000000 +0000 @@ -15,14 +15,82 @@ # the debian-policy package +migrate_seedd_conf() +{ + local oldconf='/etc/default/seedd' + local newconf='/etc/bit-babbler/seedd.conf' + local saveconf="${oldconf}.dpkg-old" + + # Remove the old config if it was unmodified + rm -f "${oldconf}.dpkg-remove" + + # Otherwise, if it's still there and we still own it, let's convert it. + [ -e "$oldconf" ] || return 0 + dpkg-query -L bit-babbler | grep -F -q -x "$oldconf" || return 0 + + # Preserve the new config file that was shipped with the package, in the + # same way as if the user had chosen "keep my local changes" at the dpkg + # conffile update prompt. Except for this we keep them without prompting. + [ ! -e "$newconf" ] || mv -f "$newconf" "${newconf}.dpkg-new" + + # Simulate how the old config was converted to command line options in the + # init script, then generate a new format config file using those options. + # Pretend we were invoked by systemd for that, since the new init script + # will itself add the needed options which that would omit. + ( + SEEDD_ARGS="-k" + . "$oldconf" + [ -z "$CONTROL_GROUP" ] || SEEDD_ARGS="$SEEDD_ARGS --socket-group $CONTROL_GROUP" + + export NOTIFY_SOCKET=@dummy + + cat > "$newconf" <<-EOF + # These options were automatically migrated from $oldconf + # during upgrade to the bit-babbler 0.8 (or later) release. + # + # A copy of the old file has been preserved in $saveconf + # which can safely be deleted now after confirming that there is nothing + # remaining in it which you do wish to retain a copy of. + # + # This file can safely be edited to customise the seedd configuration, it will + # not be automatically (re)generated again after the initial conversion of the + # old configuration file has created it. + + EOF + + if /usr/bin/seedd --gen-conf $SEEDD_ARGS >> "$newconf" 2>/dev/null; then + # Preserve a copy of their old config. We migrated anything which was + # actively being used by the old init script, but there may be comments + # or commented out configurations, or other things which it would be a + # bit rude of us to just completely delete without asking. + cat - "$oldconf" > "$saveconf" <<-EOF + # NOTE: This file contains the last content from $oldconf + # prior to it being converted into the new $newconf + # + # It may safely be deleted if there is nothing else in it which you wish + # to retain. The seedd configuration uses options from the new file now + # and there is nothing which still uses anything from the old one at all. + + EOF + + rm -f "$oldconf" + echo "Automatically migrated $oldconf configuration to $newconf" + echo "The previous configuration file was saved to $saveconf" + fi + ) +} + case "$1" in configure) addgroup --quiet --system bit-babbler sysctl -q -p /etc/sysctl.d/bit-babbler-sysctl.conf || true + + # Version 0.8 adds /etc/bit-babbler/seedd.conf, replacing the previous + # daemon configuration options which were set in /etc/default/seedd. + dpkg --compare-versions -- "$2" ge-nl '0.8~' || migrate_seedd_conf ;; abort-upgrade|abort-remove|abort-deconfigure) - exit 0 ;; *) @@ -34,3 +102,5 @@ #DEBHELPER# exit 0 + +# vi:sts=4:sw=4:noet diff -Nru bit-babbler-0.7/debian/bit-babbler.postrm bit-babbler-0.8/debian/bit-babbler.postrm --- bit-babbler-0.7/debian/bit-babbler.postrm 1970-01-01 00:00:00.000000000 +0000 +++ bit-babbler-0.8/debian/bit-babbler.postrm 2018-02-07 23:56:52.000000000 +0000 @@ -0,0 +1,52 @@ +#! /bin/sh + +set -e + +# summary of how this script can be called: +# * `remove' +# * `purge' +# * `upgrade' +# * `failed-upgrade' +# * `abort-install' +# * `abort-install' +# * `abort-upgrade' +# * `disappear' overwrit>r> +# for details, see http://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +revert_seedd_conf() +{ + local oldconf='/etc/default/seedd' + + # Check that the old file hasn't already been removed, and that it hasn't + # been usurped by some other package (which is unlikely, but would be Bad + # for what we're going to do next). + [ -e "${oldconf}.dpkg-remove" ] || return 0 + dpkg-query -L bit-babbler | grep -F -q -x "$oldconf" || return 0 + + # Put things back to how they started before the upgrade. + mv "${oldconf}.dpkg-remove" "$oldconf" +} + +case "$1" in + abort-install|abort-upgrade) + # Version 0.8 adds /etc/bit-babbler/seedd.conf, replacing the previous + # daemon configuration options which were set in /etc/default/seedd. + dpkg --compare-versions -- "$2" ge-nl '0.8~' || revert_seedd_conf + ;; + + remove|purge|upgrade|failed-upgrade|disappear) + ;; + + *) + echo "postrm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +#DEBHELPER# + +exit 0 + +# vi:sts=4:sw=4:noet diff -Nru bit-babbler-0.7/debian/bit-babbler.preinst bit-babbler-0.8/debian/bit-babbler.preinst --- bit-babbler-0.7/debian/bit-babbler.preinst 1970-01-01 00:00:00.000000000 +0000 +++ bit-babbler-0.8/debian/bit-babbler.preinst 2018-02-07 23:56:52.000000000 +0000 @@ -0,0 +1,53 @@ +#! /bin/sh + +set -e + +# summary of how this script can be called: +# * `install' +# * `install' +# * `upgrade' +# * `abort-upgrade' +# for details, see http://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +check_seedd_conf() +{ + local oldconf='/etc/default/seedd' + + # Check that the old file hasn't already been removed, and that it hasn't + # been usurped by some other package (which is unlikely, but would be Bad + # for what we're going to do next). + [ -e "$oldconf" ] || return 0 + dpkg-query -L bit-babbler | grep -F -q -x "$oldconf" || return 0 + + local sys_sum=$(md5sum "$oldconf" | sed -e 's/ .*//') + local pkg_sum=$(dpkg-query -W -f='${Conffiles}' bit-babbler | sed -n -e "\'^ $oldconf ' { s/ obsolete$//; s/.* //; p }") + + # If it's unmodified, move it aside for deletion if the upgrade succeeds. + [ "$sys_sum" != "$pkg_sum" ] || mv -f "$oldconf" "${oldconf}.dpkg-remove" + + # Otherwise, we'll migrate it to the new form in postinst. +} + +case "$1" in + install|upgrade) + # Version 0.8 adds /etc/bit-babbler/seedd.conf, replacing the previous + # daemon configuration options which were set in /etc/default/seedd. + dpkg --compare-versions -- "$2" ge-nl '0.8~' || check_seedd_conf + ;; + + abort-upgrade) + ;; + + *) + echo "preinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +#DEBHELPER# + +exit 0 + +# vi:sts=4:sw=4:noet diff -Nru bit-babbler-0.7/debian/bit-babbler.seedd.default bit-babbler-0.8/debian/bit-babbler.seedd.default --- bit-babbler-0.7/debian/bit-babbler.seedd.default 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/debian/bit-babbler.seedd.default 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -# Defaults for the BitBabbler seedd initscript, sourced by /etc/init.d/seedd -# -# This is a POSIX shell fragment - -# Additional command line options to use when seedd is started. -#SEEDD_ARGS="-k" - -# An optional group name, members of which will be permitted to connect to the -# seedd control socket. If set here do not set --socket-group in SEEDD_ARGS. -CONTROL_GROUP="adm" - diff -Nru bit-babbler-0.7/debian/bit-babbler.seedd.init bit-babbler-0.8/debian/bit-babbler.seedd.init --- bit-babbler-0.7/debian/bit-babbler.seedd.init 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/debian/bit-babbler.seedd.init 2018-02-07 23:56:52.000000000 +0000 @@ -2,9 +2,9 @@ # ### BEGIN INIT INFO # Provides: seedd -# Required-Start: $remote_fs $local_fs $syslog -# Required-Stop: $remote_fs $local_fs $syslog -# Default-Start: 2 3 4 5 +# Required-Start: $local_fs $syslog +# Required-Stop: $local_fs $syslog +# Default-Start: S # Default-Stop: 0 1 6 # Short-Description: BitBabbler entropy source daemon # Description: @@ -16,19 +16,12 @@ NAME=seedd DESC="BitBabbler entropy source daemon" DAEMON=/usr/bin/seedd - -# Default options. Do not edit them here, they can be overridden by setting -# alternative values in the /etc/default/seedd file. -SEEDD_ARGS="-k" +SEEDD_CONFIG=/etc/bit-babbler/seedd.conf PATH=/sbin:/bin:/usr/sbin:/usr/bin [ -x $DAEMON ] || exit 0 -[ ! -r /etc/default/$NAME ] || . /etc/default/$NAME - -[ -z "$CONTROL_GROUP" ] || SEEDD_ARGS="$SEEDD_ARGS --socket-group $CONTROL_GROUP" - . /lib/init/vars.sh . /lib/lsb/init-functions @@ -42,7 +35,7 @@ start-stop-daemon --start --quiet --exec $DAEMON --test > /dev/null \ || return 1 - start-stop-daemon --start --quiet --exec $DAEMON -- -d $SEEDD_ARGS \ + start-stop-daemon --start --quiet --exec $DAEMON -- -d -C $SEEDD_CONFIG \ || return 2 } @@ -82,13 +75,13 @@ esac ;; - reload|force-reload) + reload) log_daemon_msg "Reloading $DESC" "$NAME" do_reload log_end_msg $? ;; - restart) + force-reload|restart) log_daemon_msg "Restarting $DESC" "$NAME" do_stop case "$?" in diff -Nru bit-babbler-0.7/debian/changelog bit-babbler-0.8/debian/changelog --- bit-babbler-0.7/debian/changelog 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/debian/changelog 2018-02-07 23:56:52.000000000 +0000 @@ -1,3 +1,81 @@ +bit-babbler (0.8) unstable; urgency=medium + + * Support hotplugging devices into libvirt guest domains which have names + containing characters that are not valid as part of a shell variable name. + Another reminder that the important part of keeping things as simple as + possible is always the "as possible" bit. + + * Support reading seedd(1) options from a configuration file. The original + design plan explicitly avoided this, partly just to keep the code as + simple and easy to audit as possible, and partly because it was desirable + to make invocation as simple and foolproof as possible. The more options + that something has, the easier it is to make some mistake with running it + which could have subtle and even serious consequences. But we are at the + point now where there are enough real alternative options which are either + genuinely desirable or needed for some use case, that the balance becomes + weighted toward being able to keep persistent configuration settings in a + file rather than having to spell them out on the command line each time. + + The final straw for making this change now was the inability of systemd to + sanely support the existing simplified configuration interface that was + provided in /etc/default/seedd for the SysV init script. When given the + alternative choices available to us of either adding a shell wrapper to + do what systemd could not, or forcing people to manually edit or override + the systemd unit directly to make any configuration change, this was + clearly the Lesser Evil to embrace if we were going to provide a native + systemd unit for the system daemon. The former gains us nothing over the + existing LSB init script, and the latter would require every user to first + have a solid grasp of all the non-obvious consequences which can come into + play when configuring a system which (according to systemd.directives(7)) + "contains 2464 entries in 13 sections, referring to 241 individual manual + pages" - and where even package maintainers and systemd upstream still + make mistakes that can take a long time for the real consequences to be + noticed. So if we were to provide a systemd unit, it needs to be well + tested and give people few, if any, reasons to ever need to modify it. + + * Preserve existing configuration on package upgrades. The new default + configuration file behaves the same way as the old defaults did. If the + settings in /etc/default/seedd have been customised, then on upgrade we + generate a custom /etc/bit-babbler/seedd.conf implementing the same set + of options. The old customised file content will be retained, and can + be found in /etc/default/seedd.dpkg-old, in case there was anything else + in it which people might also want to keep, but after checking for that + it can safely be removed by the system admin. Nothing from this package + uses files in /etc/default from this version onward. + + * Two systemd unit files are now included in this package, but only one is + enabled by default. + + The seedd.service unit provides the same functionality as the SysV init + script does, and will be used instead of it on systems where systemd is + running as the init process. It will start the seedd(1) daemon as soon + as possible during boot, reading its options from the new configuration + file, and if feeding entropy to the kernel it will begin doing so as soon + as the available USB devices are announced to the system by udev. + + The seedd-wait.service oneshot unit is not enabled by default. It provides + a simple sequence point which may be used to ensure that QA checked seed + entropy from available BitBabbler devices can be mixed into the kernel's + pool before other ordinary services which might rely upon it are started. + This is its default behaviour if it is simply enabled, and ordinarily it + will not delay the boot for very long, only until udev announces a device + that we can read some good seed bits from. By default this will time out + after 30 seconds if good entropy cannot be obtained, which should be more + than enough time to get a good seed if that was going to be possible, but + won't completely cripple the system when it is acceptable for it to still + be running without having a working BitBabbler attached. + + Additionally, the seedd-wait.service can also be used to place a harder + constraint on individual services, if there are particular things which + the local admin does not want started at all if good seed entropy was not + obtained. Or it can be configured to divert the boot to a degraded mode + (such as the single-user mode emergency.target) if the availability of + good entropy from a BitBabbler should be a hard requirement for the whole + system. For more details of its use see the BOOT SEQUENCING section of + the seedd(1) manual page. + + -- Ron Lee Thu, 08 Feb 2018 10:26:52 +1030 + bit-babbler (0.7) unstable; urgency=medium * Handle the oddball case of a RHEL/CentOS 6 kernel being used with libusb diff -Nru bit-babbler-0.7/debian/compat bit-babbler-0.8/debian/compat --- bit-babbler-0.7/debian/compat 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/debian/compat 2018-02-07 23:56:52.000000000 +0000 @@ -1 +1 @@ -5 +9 diff -Nru bit-babbler-0.7/debian/control bit-babbler-0.8/debian/control --- bit-babbler-0.7/debian/control 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/debian/control 2018-02-07 23:56:52.000000000 +0000 @@ -2,8 +2,8 @@ Section: admin Priority: optional Maintainer: Ron Lee -Build-Depends: debhelper (>= 5), libusb-1.0-0-dev, libudev-dev [linux-any] -Standards-Version: 4.0.0.1 +Build-Depends: debhelper (>= 9), dh-systemd, libusb-1.0-0-dev, libudev-dev [linux-any] +Standards-Version: 4.1.3.0 Homepage: http://www.bitbabbler.org Package: bit-babbler diff -Nru bit-babbler-0.7/debian/copyright bit-babbler-0.8/debian/copyright --- bit-babbler-0.7/debian/copyright 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/debian/copyright 2018-02-07 23:56:52.000000000 +0000 @@ -1,7 +1,7 @@ The bit-babbler package is: - Copyright (C) 2003 - 2017, Ron Lee + Copyright (C) 2003 - 2018, Ron Lee It is distributed according to the terms of the GNU GPL v2. diff -Nru bit-babbler-0.7/debian/rules bit-babbler-0.8/debian/rules --- bit-babbler-0.7/debian/rules 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/debian/rules 2018-02-07 23:56:52.000000000 +0000 @@ -1,6 +1,6 @@ #!/usr/bin/make -f # -# Copyright 2003 - 2015 Ron Lee. +# Copyright 2003 - 2018 Ron Lee. SHELL = /bin/bash @@ -84,7 +84,11 @@ dh_prep $(MAKE) -C $(objdir_shared) install DESTDIR=$(CURDIR)/debian/bit-babbler dh_install debian/bit-babbler-sysctl.conf etc/sysctl.d + dh_systemd_enable seedd.service + dh_systemd_enable --no-enable seedd-wait.service dh_installinit --restart-after-upgrade --name seedd + dh_systemd_start --restart-after-upgrade seedd.service + dh_systemd_start --no-start seedd-wait.service dh_installudev dh_installexamples doc/examples/* libvirt/qemu-hook touch $@ diff -Nru bit-babbler-0.7/doc/examples/seedd.conf bit-babbler-0.8/doc/examples/seedd.conf --- bit-babbler-0.7/doc/examples/seedd.conf 1970-01-01 00:00:00.000000000 +0000 +++ bit-babbler-0.8/doc/examples/seedd.conf 2018-02-07 23:56:52.000000000 +0000 @@ -0,0 +1,179 @@ +# Example configuration file for seedd. + +# Application-wide options are defined in this section. All options set +# here do the same thing as the command line options with the same name. +[Service] + # Fork to the background and run as a daemon (--daemon). + # You should not need (or want) to set this here for a process which is + # expected to be managed by systemd or a SysV init script, you should let + # their configuration control how it is to be placed into the background. + # But you might use this if you want to manually background a separate + # process run directly from the command line. + #daemon + + # Feed entropy to the OS kernel (--kernel). + kernel + + # When listening on an IP socket, don't require the needed network interface + # to already be up. This allows seedd to be started early, without waiting + # for network configuration to occur even when it should listen on a specific + # address. If this option is not enabled, then it is a fatal error for the + # udp-out service or a TCP control-socket to be bound to an address which is + # not already configured when seedd is started. + ip-freebind + + # Provide a UDP socket for entropy output on port 12345 of 127.0.0.1. + #udp-out 127.0.0.1:12345 + + # Where to create the service control socket (used by bbctl and munin etc.). + # May be set to 'none' to not create a control socket at all. If this option + # is used to change the default control-socket address, then you will also + # need to explicitly specify the new address to other tools accessing it too. + #control-socket /run/bit-babbler/seedd.socket + + # Give users in this system group permission to access the control socket. + socket-group adm + + # Request more or less information to be logged about what is going on. + # This may be changed on the fly at runtime with `bbctl --log-verbosity` + # if the control socket is available. + #verbose 3 + + +# Options to configure the entropy collection pool. +# You normally shouldn't need to change or set anything here unless you have +# very special requirements and know exactly what you are doing and why. +#[Pool] + # The size of the internal entropy pool in bytes (--pool-size). + #size 64k + + # The device node used to feed fresh entropy to the OS kernel. + #kernel-device /dev/random + + # The maximum time in seconds before fresh entropy will be added to the OS + # kernel, even when it hasn't drained below its usual refill threshold. + #kernel-refill 60 + + +# Define an entropy collecting group and the size of its pool (--group-size). +# The group_number is the integer given after the PoolGroup: string, and is the +# value used for the Device 'group' option to assign a device to that group. +# Pool groups can be used when multiple BitBabbler devices are available to +# optimise for throughput or redundancy. If you only have a single device, +# you probably don't need to define any groups explicitly. By default all +# devices are placed into group 0 unless otherwise configured. +# Any number of pool groups that are needed may be defined. +#[PoolGroup:0] +# size 64k + +#[PoolGroup:1] +# size 64k + + +# This section configures the defaults to use for all BitBabbler devices which +# don't override them in a per-device section (or on the command line). +# All options set here do the same thing as the command line options with the +# same name when passed before any --device-id option. +#[Devices] + # The rate in bits per second at which to clock raw bits out of the device. + #bitrate 2.5M + + # Override the calculated value for the USB latency timer. + #latency 5 + + # Set the number of times to fold the BitBabbler output before adding it to + # the pool. The default for this depends on the device type. White devices + # default to folding just once, Black devices with only a single generator + # will fold 3 times to emulate the four generators on the White devices. + #fold 3 + + # The entropy PoolGroup to add the device to. + #group 0 + + # Select a subset of the generators on BitBabbler devices with multiple + # entropy sources. The argument is a bitmask packed from the LSB, with each + # bit position controlling an individual source, enabling it when set to 1. + # There is usually no good reason to mask generators in normal use, the main + # use case is to verify the output of each generator separately when testing. + #enable-mask 0x0f + + # Configure how devices back off from generating entropy at the maximum rate + # when it is not actually being consumed by anything. When the pool first + # becomes full, we will pause for the 'initial' number of milliseconds, + # doubling that delay each time we wake with the pool still full up to the + # maximum value. As a special case, if the max value is 0 then further reads + # from the device will be suspended indefinitely once the delay reaches 512ms + # until the pool is no longer full. + #idle-sleep 100:60000 + + # The threshold in milliseconds where if we expect the device to be idle for + # longer than that, we will release our claim on it, allowing the OS to put + # it into a low power mode while it is not being used. A value of 0 means + # we will never release the claim unless seedd is halted or it is unplugged. + #suspend-after 0 + + # Enable options for better power saving on lightly loaded systems. + # This is equivalent to using: + # -kernel-refill=3600 --idle-sleep=100:0 --suspend-after=10000 + # Which should be a reasonable balance of allowing the system to suspend as + # much as possbile when idle while keeping sufficient fresh entropy on hand + # for when it is needed. + #low-power + + # Limit the maximum transfer chunk size to 16kB. This is a workaround for + # buggy USB chipsets (and their drivers) which still do exist on some + # motherboards and have trouble when larger transfers are used. The impact + # on transfer speed of this is relatively minimal, but you normally wouldn't + # want to enable this unless you actually see real problems without it. + #limit-max-xfer + + # Disable gating entropy output on the result of quality and health checking. + # You almost never want to use this option at all, and even less so in a + # configuration file for a system daemon. The main reason this option exists + # at all is for generating streams which are to be analysed for quality by + # some external test suite - in which case we definitely don't want to be + # filtering any bad blocks of bits from what it would see. But you'd never + # want to do this for any 'normal' use where good entropy is always assumed. + # Because of that, this *only* disables the QA gating on bits output via + # stdout. And we do support it in this configuration file as that may be a + # convenient way to record the configuration which is used for such testing. + #no-qa + + +# Sections with a Device: prefix can be used to both enable and configure +# individual devices. The following is the equivalent of passing the command +# line option --device-id=XYZZY and if any options are specified for this +# section, that is equivalent to passing them after the --device-id option in +# that they will only apply to this device and no others. All the options for +# the [Devices] section above may be used here. +# +# If no [Device:] sections are defined, the default is to operate on all of +# the devices which are available. It is not an error to define a section for +# a device which is not, or may not be, present at any given time. +#[Device:XYZZY] + + +# [Watch:] sections can be used to run our QA testing on some other external +# source of random bits, provided we can read them as if they were a file or +# named pipe or device node. Any number of Watch sections may be defined, +# each just needs its own unique label after the Watch: prefix to identify it. +# +# For example, the following will run QA testing on the /dev/urandom device, +# reading a block of 64kB every 500ms for analysis. The results of that +# analysis are available from the control socket in the same way as the QA +# reports on bits from the BitBabbler devices and the internal pools are. +#[Watch:urandom] + # The path to the device/pipe/file to read bits from. This must be set. + #path /dev/urandom + + # How long to wait before reading the next block of bits. Default 0. + #delay 500 + + # The number of bytes to read between each delay period. Default 64kB. + #block-size 64k + + # The maximum number of bytes to read in total. Default 0 implies reading + # an 'infinite' number of bits for as long as the process keeps running, + # which is probably what you usually want when using this. + #max-bytes 1G + diff -Nru bit-babbler-0.7/doc/examples/seedd.service bit-babbler-0.8/doc/examples/seedd.service --- bit-babbler-0.7/doc/examples/seedd.service 1970-01-01 00:00:00.000000000 +0000 +++ bit-babbler-0.8/doc/examples/seedd.service 2018-02-07 23:56:52.000000000 +0000 @@ -0,0 +1,17 @@ +[Unit] +Description=BitBabbler entropy source daemon +Documentation=man:seedd(1) +DefaultDependencies=no +After=systemd-remount-fs.service +Before=local-fs.target +Conflicts=shutdown.target +ConditionPathExists=/etc/bit-babbler/seedd.conf + +[Service] +Type=notify +ExecStart=/usr/bin/seedd --config /etc/bit-babbler/seedd.conf +KillMode=mixed +CapabilityBoundingSet=CAP_CHOWN CAP_FOWNER CAP_SYS_ADMIN + +[Install] +WantedBy=sysinit.target diff -Nru bit-babbler-0.7/doc/examples/seedd-wait.service bit-babbler-0.8/doc/examples/seedd-wait.service --- bit-babbler-0.7/doc/examples/seedd-wait.service 1970-01-01 00:00:00.000000000 +0000 +++ bit-babbler-0.8/doc/examples/seedd-wait.service 2018-02-07 23:56:52.000000000 +0000 @@ -0,0 +1,70 @@ +[Unit] +Description=Wait for initial kernel entropy seeding +Documentation=man:seedd(1) +DefaultDependencies=no +After=seedd.service + +# Ordinarily, we want to block everything which might run after local-fs.target +# until either we have good seed entropy, or know that we definitely won't be +# getting it from seedd, or we time-out and give up waiting for it. But if this +# (or anything else) failing lands us at the emergency.target, then systemd may +# already consider the local-fs.target has been reached, so if people try to +# enter a normal system mode again with `systemctl default` as it prompts them +# to, then it won't block here anymore, and will start everything else up as if +# this succeeded. But if this still fails then when the timeout expires, they +# will suddenly and without explanation, be thrown back into emergency mode +# again. Unless they did something like ssh in during that window, in which +# case they'll unlock the achievement of being in single-user mode while being +# logged in with multiple users simultaneously. +# +# So to avoid the cognitive dissonance of seeing that they have both tea and +# no-tea, we need to set up a second roadblock at sysinit.target, which should +# prevent starting most things which the emergency.target didn't itself start. +# The only nasty part then is that syslog is disabled by emergency.target, so +# it may be tricky to discover why they keep being thrown back into it, but +# there's not a whole lot we can do here to solve that quirk of systemd. +Before=local-fs.target sysinit.target + +# In theory this should probably be Requires=, since this will fail if seedd +# is not running (though strictly speaking, an instance of seedd that is not +# managed by systemd would still suffice) - but the main reason not to use a +# Requires dependency here is so that this will not automatically be restarted +# any time that seedd.service is. This really only needs to run once at boot, +# and if there are other units which do block hard on this one with a Requires +# dependency of their own, the restart would cascade all the way down through +# those too - and they almost certainly should not be restarted (or stopped +# completely!) just because seedd was. On the off-chance this is the desired +# behaviour for some use case, it is still possible to edit this unit, or use +# a drop-in to upgrade this relationship to Requires - but you should remember +# that seedd will be automatically restarted if the package is upgraded, so it +# would be unwise for that to trigger a restart of anything which would be Bad +# if it happens in the middle of a dist-upgrade or similar. +Wants=seedd.service + +# Another option for maximally paranoid systems would be something like the +# following, which would put the system into single-user mode if we were unable +# to seed the kernel sufficiently at boot. But you could also do something a +# bit less aggressive in the same way, starting only a limited set of emergency +# services (but more than just a single-user login) in that case. +#OnFailure=emergency.target +#OnFailureJobMode=replace-irreversibly + + +[Service] +Type=oneshot +RemainAfterExit=yes + +# Wait for at least one QA checked block of bits to seed the OS kernel pool, +# polling for that 4 times/sec, and reporting failure if it could not be done +# in less than 30 seconds. Output enough verbosity to show in the system log +# what we are doing and when it happens. +ExecStart=/usr/bin/bbctl -v --waitfor Kernel:2500:250:30k + +# Belt and braces, have systemd fail it if there was no result in 45 seconds. +# We want to limit the worst case of preventing at least a minimal boot +# proceeding to give admin access if something really went Terribly Wrong. +TimeoutStartSec=45 + + +[Install] +WantedBy=seedd.service diff -Nru bit-babbler-0.7/doc/man/bbcheck.1 bit-babbler-0.8/doc/man/bbcheck.1 --- bit-babbler-0.7/doc/man/bbcheck.1 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/doc/man/bbcheck.1 2018-02-07 23:56:52.000000000 +0000 @@ -159,7 +159,7 @@ entropy than the pad does). .TP -.BI " \-\-enable=" mask +.BI " \-\-enable\-mask=" mask Select a subset of the generators on BitBabbler devices with multiple entropy sources. The argument is a bitmask packed from the LSB, with each bit position controlling an individual source, enabling it when set to 1. As a special diff -Nru bit-babbler-0.7/doc/man/bbctl.1 bit-babbler-0.8/doc/man/bbctl.1 --- bit-babbler-0.7/doc/man/bbctl.1 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/doc/man/bbctl.1 2018-02-07 23:56:52.000000000 +0000 @@ -2,7 +2,7 @@ .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) -.TH BBCTL 1 "February 24, 2015" +.TH BBCTL 1 "January 24, 2018" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: @@ -103,6 +103,45 @@ Change the logging verbosity of the control socket owner. .TP +.BI " \-\-waitfor=" device : passbytes : retry : timeout +This option will make \fBbbctl\fP wait before exiting until the \fBseedd\fP(1) +QA checking reports that at least \fIpassbytes\fP of good entropy have been +obtained from the given \fIdevice\fP. It will check for that every \fIretry\fP +milliseconds, waiting for a maximum of \fItimeout\fP milliseconds before +failing. + +The \fIdevice\fP is a QA test identifier as reported by \fB\-\-scan\fP, and +must be provided, as must the expected \fIpassbytes\fP count. The \fIretry\fP +time is optional, and if not specified it will default to 1000 milliseconds. +If the \fItimeout\fP is 0 (or not explicitly passed), then this will wait for +an unbounded amount of time for the requested condition to occur. + +The \fIpassbytes\fP, \fIretry\fP, and \fItimeout\fP parameters may be suffixed +with an SI multiplier (e.g. k, M, G) as a convenience, so a \fItimeout\fP of +30k would wait for 30 seconds. + +This option may be passed multiple times to wait for multiple devices, and the +given conditions for each of them will be tested for in the order that they +are specified on the command line. i.e. Later conditions will not be tested +for at all until all prior ones have been met, and the \fItimeout\fP clock for +each test only begins after the previous test has successfully completed. + +When all required conditions pass, \fBbbctl\fP will report success with an exit +code of 0. If a \fItimeout\fP is exceeded, or any other error occurs which +means the test cannot be successfully completed (like passing a \fIdevice\fP +which does not exist, or querying a \fB\-\-control\-socket\fP which no process +provides), then a non-zero exit code will be returned. + +This option mostly exists to make it possible to delay or even prevent other +services from starting until a sufficient amount of entropy has been obtained +to feel comfortable that they can operate securely or as intended. See the +notes on \fBBOOT\ SEQUENCING\fP in \fBseedd\fP(1) for more details on that. +It may be used for other purposes too, but note that \fIpassbytes\fP is an +absolute measure of the number of good bytes seen since \fBseedd\fP was +started, it is not relative to the number that were obtained prior to +executing this request. + +.TP .B \-v, \-\-verbose Make more noise about what is going on internally. It may be passed multiple times to get swamped with even more information. @@ -118,8 +157,12 @@ .SH FILES .TP -.I /var/run/bit\-babbler/seedd.socket -The default control socket path if not explicitly specified. +.I /run/bit\-babbler/seedd.socket +The default \fB\-\-control\-socket\fP path if not explicitly specified. This +may be under \fI/var/run\fP on platforms which don't (yet) provide a \fI/run\fP +top level directory (or a TCP socket on platforms which don't support unix +domain sockets). It is set at compile time by \fBSEEDD_CONTROL_SOCKET\fP. + .SH SEE ALSO .BR seedd (1). diff -Nru bit-babbler-0.7/doc/man/bbvirt.1 bit-babbler-0.8/doc/man/bbvirt.1 --- bit-babbler-0.7/doc/man/bbvirt.1 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/doc/man/bbvirt.1 2018-02-07 23:56:52.000000000 +0000 @@ -2,7 +2,7 @@ .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) -.TH BBVIRT 1 "January 12, 2016" +.TH BBVIRT 1 "January 2, 2018" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: @@ -176,10 +176,10 @@ To string this together, you'll need to ensure all of the following: .IP - 2 -The \fBudev\fP(7) rules from the bit-babbler package are installed. If you +The \fBudev\fP(7) rules from the bit\-babbler package are installed. If you installed this from the Debian packages that should already be done. If you didn't, you will need to install the rules that are found in -\fIdebian/bit-babbler.udev\fP from the source package to a suitable place on +\fIdebian/bit\-babbler.udev\fP from the source package to a suitable place on your system (probably \fI/etc/udev/rules.d\fP). .IP - 2 @@ -190,7 +190,7 @@ .IP - 2 The devices you wish to use in guest machines, and the machines you wish to use them in, are specified in the \fBbbvirt\fP configuration file. The default -location for that is \fI/etc/bit-babbler/vm.conf\fP. If you wish to use a +location for that is \fI/etc/bit\-babbler/vm.conf\fP. If you wish to use a different file you will need to pass its location with the \fB\-\-config\fP option in the \fBudev\fP rules, and update the hook script use that file too. The details of what you can put in that file are described in the @@ -207,8 +207,8 @@ overwriting an existing hook, everyone will need to do this step manually. If you have installed the Debian packages, then the example hook script that we've provided for this can -be found in \fI/usr/share/doc/bit-babbler/examples/qemu-hook\fP. If you didn't -it can be found in \fIlibvirt/qemu-hook\fP of the source package. +be found in \fI/usr/share/doc/bit\-babbler/examples/qemu\-hook\fP. If you +didn't it can be found in \fIlibvirt/qemu\-hook\fP of the source package. You will need to install that file as \fI/etc/libvirt/hooks/qemu\fP, or merge its content with the existing \fIqemu\fP file there if you already have that @@ -231,7 +231,7 @@ \fIdevice\fP may be specified by its serial number, its logical address on the bus (in the form \fIbusnum\fP:\fIdevnum\fP, given as decimal integers), or its physical address on the bus (in the form -\fIbusnum\fP-\fIport\fP[\fI.port\fP\ ...]). +\fIbusnum\fP\-\fIport\fP[\fI.port\fP\ ...]). If the action to perform is \fBattach\-all\fP or \fBdetach\-all\fP, then the device(s) to act upon are selected by \fIdomain\fP association instead. If a @@ -306,7 +306,19 @@ any other variables you use, or any other side effects you might cause to happen. Any number of guest domains may be configured in it. -For each guest domain, two variables control the behaviour of \fBbbvirt\fP: +For each guest domain, three variables control the behaviour of \fBbbvirt\fP: + +.TP +.BI DOMAIN_NAME_ domain = guestname +This variable is optional if \fIguestname\fP and \fIdomain\fP are the same. +It must be used if the libvirt guest name contains any characters which would +not be valid for use as a shell variable name (i.e. anything that is not ASCII +a-z, A-Z, 0-9, or the underscore). If set, it indicates that the corresponding +\fBDOMAIN_*_\fP\fIdomain\fP variables shown below are configuration for the +libvirt guest domain \fIguestname\fP instead of one with the name \fIdomain\fP. + +When specifying a domain option for \fBbbvirt\fP to act upon, you may use +either of the \fIdomain\fP or \fIguestname\fP identifiers interchangeably. .TP .BI DOMAIN_URI_ domain = URI @@ -335,12 +347,12 @@ .SH FILES .TP -.I /etc/bit-babbler/vm.conf +.I /etc/bit\-babbler/vm.conf The default configuration file for assigning BitBabbler devices to libvirt managed virtual machine domains. .TP -.I /lib/udev/rules.d/60-bit-babbler.rules +.I /lib/udev/rules.d/60\-bit\-babbler.rules The default \fBudev\fP(7) rules granting direct device access to users in the group \fBbit\-babbler\fP, enabling USB autosuspend when the device is idle, and invoking \fBbbvirt\fP to handle device hotplug for virtual machines. diff -Nru bit-babbler-0.7/doc/man/seedd.1 bit-babbler-0.8/doc/man/seedd.1 --- bit-babbler-0.7/doc/man/seedd.1 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/doc/man/seedd.1 2018-02-07 23:56:52.000000000 +0000 @@ -2,7 +2,7 @@ .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) -.TH SEEDD 1 "May 29, 2017" +.TH SEEDD 1 "Jan 11, 2018" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: @@ -40,19 +40,19 @@ Show all available BitBabbler devices, in detail: - seedd -sv (or \-\-scan \-\-verbose) + seedd \-sv (or \-\-scan \-\-verbose) Output 1 million bytes to a file, drawn from all available devices: - seedd -b 1000000 > random\-bytes.out + seedd \-b 1000000 > random\-bytes.out Stream entropy continuously to \fIstdout\fP (with no control socket): - seedd -o -c none | your\-thing\-reading\-stdin + seedd \-o \-c none | your-thing-reading-stdin Run as a daemon, feeding entropy to the OS kernel pool: - seedd -k -d + seedd \-k \-d To read from only specific devices, add the \fB\-\-device\-id\fP option too. @@ -66,11 +66,31 @@ readable format. .TP -.B " \-\-shell-mr" +.B " \-\-shell\-mr" Scan the system for available BitBabbler devices, reporting them in a machine readable format that is suitable for importing into shell scripts. .TP +.BI "\-C, \-\-config=" file +Read configuration options from a \fIfile\fP. Details of the contents of that +file is described in the \fBCONFIGURATION FILE FORMAT\fP section below. This +option may be passed multiple times to import configuration from multiple +files, and precedence of the options set in them is the same as if they were +passed on the command line at the point where this option is used. Where +option settings are duplicated, the last one seen is the one which will be +applied. Which means if you want options on the command line to override any +setting in the config file(s), they must be passed after this option. + +Options which are cumulative will continue to accumulate wherever they are +seen, so for example, passing an additional \fB\-\-device\-id\fP on the +command line will simply add an extra device, it will not override the use +of any devices defined in the configuration file. But you can still override +any per-device options which may be set there. The only exception to this is +\fB\-\-verbose\fP, where the verbosity level selected on the command line will +always override any setting from a configuration file regardless of the order +they appear in. + +.TP .BI "\-i, \-\-device\-id=" id Select a BitBabbler device to read from by its unique ID. If no devices are explicitly specified then the default is to use all of them (including any @@ -105,13 +125,15 @@ .TP .B \-d, \-\-daemon Fork to the background and run as a daemon process. If this option is not -specified then \fBseedd\fP will remain in the foreground. +specified then \fBseedd\fP will remain in the foreground. This option will +be ignored if \fBseedd\fP is started by \fBsystemd\fP(1) as a service using +the \fBnotify\fP start-up type. .TP .BI "\-b, \-\-bytes=" n Send \fIn\fP bytes of entropy to \fIstdout\fP. The process will exit when that is completed. This option will be ignored if either the \fB\-\-kernel\fP -or \fB\-\-udp-out\fP options are used. A suffix of 'k', 'M', or 'G' will +or \fB\-\-udp\-out\fP options are used. A suffix of 'k', 'M', or 'G' will multiply \fIn\fP by the respective power of two. If this option is not used, then entropy will be output until the process is explicitly terminated (or receives SIGPIPE). Passing this option implies \fB\-\-stdout\fP, and also @@ -119,10 +141,43 @@ passed to enable it. .TP +.B \-o, \-\-stdout +Stream entropy directly to \fIstdout\fP. If the \fB\-\-bytes\fP option is not +used, this will output an unlimited stream of entropy until the process is +halted. + +.TP .B \-k, \-\-kernel Feed entropy directly to the kernel \fI/dev/random\fP pool on demand. .TP +.B " \-\-ip\-freebind +If enabled, this option allows binding to an IP address that is non-local or +does not (yet) exist. This permits the \fB\-\-udp\-out\fP and TCP +\fB\-\-control\-socket\fP options to be configured to listen on an IP socket +without requiring the underlying network interface to be up, or the specified +IP address configured, at the time that \fBseedd\fP will try to bind to it. +Which can be useful if you want \fBseedd\fP to be started as early as possible +when the system is booted, without waiting for network configuration to occur. + +This option has no effect if \fBseedd\fP is not configured to listen and +provide a service on any IP address, and it is not supported on all platforms. +A warning will be logged if it is enabled on an unsupported platform, but that +is not a fatal error (however actually attempting to bind to an unconfigured +address quite likely still will be on most sane platforms). + +On Linux, any user may enable this option. On OpenBSD it requires superuser +privilege, and on FreeBSD it requires \fBPRIV_NETINET_BINDANY\fP privileges. +It is a fatal error to attempt to enable this with insufficient privilege. + +If this option is not enabled, then it is a fatal error for the +\fB\-\-udp\-out\fP service or a TCP \fB\-\-control\-socket\fP to be bound to +an address which is not already configured when seedd is started. This is a +configurable option because in different circumstances both behaviours can be +the more desirable choice. There is value in strong sanity checking of an +address which is always supposed to already be available for use. + +.TP .BI "\-u, \-\-udp\-out=" host : port Bind a UDP socket to the given address, which clients can use to request blocks of entropy directly from the internal pool. The \fIhost\fP part can be a DNS @@ -151,52 +206,6 @@ you can use both this and the \fB\-\-kernel\fP option together too. .TP -.B \-o, \-\-stdout -Stream entropy directly to \fIstdout\fP. - -.TP -.BI "\-P, \-\-pool\-size=" n -Specify the size of the internal entropy pool. Entropy read from a BitBabbler -will gather in that pool after health and sanity checking. When multiple -BitBabbler devices are in use, entropy from each group of devices will be -mixed into it. Entropy read from \fIstdout\fP, or the UDP socket, or delivered -to the kernel will be drawn from this pool. Fresh entropy will continue to be -mixed into it while it is not being drained faster than it can be filled. The -default pool size is 64kB, which provides a reasonable balance between what a -single BitBabbler running at 1Mbps can fill completely about twice per second, -and what most reasonable consumers might ever want to draw from it 'instantly'. -There probably aren't many good reasons to make it much larger, but making it -smaller will increase the number of input bits mixed into each output bit if -the pool is not being drained completely faster than it can fill. We do not -rely on this mixing to obtain good quality entropy from each BitBabbler device -but it doesn't hurt to be mixing more good entropy into it while the demand is -exceeded by supply. - -.TP -.BI "\-G, \-\-group\-size=" group_number : size -Set the size of a single pool group. When multiple BitBabbler devices are -available, there is a choice of whether to optimise for throughput or for -redundancy. For example a pair of devices both running at 1Mbps can together -produce an effective throughput of 2Mbps of entropy if their streams are -output independently of each other, but they can also be mixed together in -parallel to provide a stronger guarantee of entropy at 1Mbps with the stream -being at least as unpredictable as the most unpredictable device. With more -than two devices a combination of both strategies may be used. - -Devices that are placed in the same group will not add entropy to the pool -until every device in that group has contributed at least \fIsize\fP bytes to -it. If the devices are not running at the same bit rate, the faster device(s) -will continue to mix entropy into the group until every device has contributed. -This option enables configuration of that block size. The \fIgroup_number\fP -is an arbitrary integer identifier (which will be passed to the \fB\-\-group\fP -option for the device(s) to add to it). The \fIsize\fP may be followed by a -suffix of 'k', 'M', or 'G' to multiply it by the respective power of two. The -group size will be rounded up to the nearest power of two. Default is for -groups to be the same size as the pool, but they may be set either smaller or -larger than it if desired. The two values are separated by a colon with no -other space between them. - -.TP .BI "\-c, \-\-control\-socket=" path Set the filesystem path for the query and control socket that may be used to obtain information and statistics about the performance of the BitBabbler @@ -213,6 +222,10 @@ is able to connect to this port will be able to do anything the control socket allows. +If this option is used to change the default control-socket address, then you +will also need to explicitly specify the new address to other tools accessing +it too, like \fBbbctl\fP(1) and the \fBmunin\fP plugin. + .TP .BI " \-\-socket\-group=" group Permit access to the control socket by members of the named \fIgroup\fP. @@ -226,44 +239,77 @@ of a unix domain socket path. .TP -.BI " \-\-watch=" path : delay : block_size : bytes -Monitor an external device. This option does not directly effect the operation -of collecting entropy from BitBabbler devices, or contribute in any way to the -entropy that is output, either to \fIstderr\fP or the kernel. What it does do -is leverage the quality assurance and health checking algorithms, and the trend -monitoring functionality that this software provides, to also permit continuous -supervision of other sources which are expected to be statistically random. +.B \-v, \-\-verbose +Make more noise about what is going on internally. If used (once) with the +\fB\-\-scan\fP option this will show more information about each device, but +otherwise it's mostly only information useful for debugging. It may be passed +multiple times to get swamped with even more information. -For example it can be used to regularly sample from \fI/dev/urandom\fP or even -from \fI/dev/random\fP to ensure the quality of their output is really what you -expect it to be. There's little point to putting the most awesome entropy that -the universe can conjure in, if what's coming out and feeding the applications -that are consuming it is totally predictable garbage. +As an exception to the handling of most cumulative options, this one will +override any previous \fBverbose\fP setting read from a configuration file, +not accumulate additional verbosity on top of that. -If this is used to monitor a limited source of blocking entropy, such as -\fI/dev/random\fP then you'll want to be judicious in selecting the rate of -reading from it, so as not to consume all the available entropy that you were -aiming to gain by feeding it from a BitBabbler in the first. If it's reading -from an 'unlimited' source backed by a PRNG, such as \fI/dev/urandom\fP, then -the only real consideration is how much of the other system resources do you -want to consume in drinking from the firehose. +.TP +.B " \-\-version" +Report the \fBseedd\fP release version. -The \fIpath\fP is the filesystem path to read from, it can be anything which -can be opened and read from like a normal unix file. The \fIdelay\fP is the -amount of time, in milliseconds, to wait between reading blocks of data from -it. The \fIblock_size\fP is the number of bytes to read in a single block -each time the watch process wakes up to read more. The total amount of data -to read can by limited to \fIbytes\fP, once that limit is reached, the watch -process for \fIpath\fP will end (but all other processing will continue as -per normal). +.TP +.B " \-\-gen\-conf" +Output text (to \fIstdout\fP) suitable for use as a configuration file based on +the command line options that were passed. None of those options will actually +be acted upon, \fBseedd\fP will simply exit after it has output a configuration +which would do the same thing as that set of options. + +Any command line options which have no equivalent configuration file option +will simply be ignored (i.e. +.BR \-\-scan ", " \-\-shell\-mr ", " \-\-bytes ", " \-\-stdout ). + +.RB The " \-\-help " and " \-\-version " options +should not be used together with this one, since they too will short-circuit +and exit before the action of this option is performed. + +Any unknown or illegal options passed after this one on the command line will +cause \fBseedd\fP to exit with a failure (non-zero) return code and without +emitting the usual usage help text or any otherwise resulting configuration +options. This allows its use to be safely scripted when the input and output +cannot or will not be immediately examined for proper sanity. -All qualifiers except the \fIpath\fP are optional, and separated by colons -with no other space between them, but all options must be explicitly set up to -the last one that is provided. The \fIdelay\fP may be followed by a suffix of -\&'k', 'M', or 'G' to multiply it by the respective power of 10, or by 'ki', -\&'Mi', or 'Gi' for powers of two if you're into that kind of thing. The -\fIblock_size\fP and \fIbytes\fP options may be similarly suffixed, but like -all good sizes on computers are always a power of two if so. +.TP +.B \-?, \-\-help +Show a shorter version of all of this, which may fit on a single page, FSVO +of page size. + + +.SS Pool options +These options may be used to control the behaviour of the entropy collection +pool. You normally shouldn't need to change or set any of these options +unless you have very special requirements and know exactly what you are doing +and why. + +.TP +.BI "\-P, \-\-pool\-size=" n +Specify the size of the internal entropy pool. Entropy read from a BitBabbler +will gather in that pool after health and sanity checking. When multiple +BitBabbler devices are in use, entropy from each group of devices will be +mixed into it. Entropy read from \fIstdout\fP, or the UDP socket, or delivered +to the kernel will be drawn from this pool. Fresh entropy will continue to be +mixed into it while it is not being drained faster than it can be filled. The +default pool size is 64kB, which provides a reasonable balance between what a +single BitBabbler running at 1Mbps can fill completely about twice per second, +and what most reasonable consumers might ever want to draw from it 'instantly'. +There probably aren't many good reasons to make it much larger, but making it +smaller will increase the number of input bits mixed into each output bit if +the pool is not being drained completely faster than it can fill. We do not +rely on this mixing to obtain good quality entropy from each BitBabbler device +but it doesn't hurt to be mixing more good entropy into it while the demand is +exceeded by supply. + +.TP +.BI " \-\-kernel\-device=" path +Set the device node used to feed fresh entropy to the OS kernel. You normally +shouldn't ever need to set this explicitly, as the default should be correct +for the platform we are running on. This option has no effect unless the +\fB\-\-kernel\fP option is being used. .TP .BI " \-\-kernel\-refill=" sec @@ -295,20 +341,28 @@ leaving it at its default setting is probably the right answer. .TP -.B \-v, \-\-verbose -Make more noise about what is going on internally. If used (once) with the -\fB\-\-scan\fP option this will show more information about each device, but -otherwise it's mostly only information useful for debugging. It may be passed -multiple times to get swamped with even more information. - -.TP -.B \-?, \-\-help -Show a shorter version of all of this, which may fit on a single page, FSVO -of page size. +.BI "\-G, \-\-group\-size=" group_number : size +Set the size of a single pool group. When multiple BitBabbler devices are +available, there is a choice of whether to optimise for throughput or for +redundancy. For example a pair of devices both running at 1Mbps can together +produce an effective throughput of 2Mbps of entropy if their streams are +output independently of each other, but they can also be mixed together in +parallel to provide a stronger guarantee of entropy at 1Mbps with the stream +being at least as unpredictable as the most unpredictable device. With more +than two devices a combination of both strategies may be used. -.TP -.B " \-\-version" -Report the \fBseedd\fP release version. +Devices that are placed in the same group will not add entropy to the pool +until every device in that group has contributed at least \fIsize\fP bytes to +it. If the devices are not running at the same bit rate, the faster device(s) +will continue to mix entropy into the group until every device has contributed. +This option enables configuration of that block size. The \fIgroup_number\fP +is an arbitrary integer identifier (which will be passed to the \fB\-\-group\fP +option for the device(s) to add to it). The \fIsize\fP may be followed by a +suffix of 'k', 'M', or 'G' to multiply it by the respective power of two. The +group size will be rounded up to the nearest power of two. Default is for +groups to be the same size as the pool, but they may be set either smaller or +larger than it if desired. The two values are separated by a colon with no +other space between them. .SS Per device options @@ -383,7 +437,7 @@ own output is still passing the QA testing. .TP -.BI " \-\-enable=" mask +.BI " \-\-enable\-mask=" mask Select a subset of the generators on BitBabbler devices with multiple entropy sources. The argument is a bitmask packed from the LSB, with each bit position controlling an individual source, enabling it when set to 1. @@ -482,7 +536,11 @@ .B " \-\-low\-power" This is a convenience option, which is equivalent to setting: - \-\-kernel\-refill=3600 \-\-idle-sleep=100:0 \-\-suspend\-after=10000 +.nh +.nf + \-\-kernel\-refill=3600 \-\-idle\-sleep=100:0 \-\-suspend\-after=10000 +.fi +.hy And which in turn means: @@ -495,7 +553,7 @@ are woken by the kernel needing entropy or by the timeout above expiring, or until something else consumes entropy from the output pool - such as from the UDP socket if that is enabled). This is based on doubling the \fIinitial\fP -\fB\-\-idle-sleep\fP timeout each time the output pool remains full, until we +\fB\-\-idle\-sleep\fP timeout each time the output pool remains full, until we exceed the minimum amount of time that really will perform a sleep (512ms), and then sleeping until explicitly woken again after that. @@ -515,25 +573,12 @@ more oriented toward). At the very least it should be a reasonable starting point to begin experimenting from on low power systems. -.TP -.B " \-\-no\-qa" -Disable gating entropy output on the result of quality and health checking. -You pretty much never want to use this unless you are generating streams to -\fIstdout\fP for no other reason than to analyse their quality with some other -tool, such as \fBdieharder\fP(1) or the NIST test suite or similar. For that -type of use we definitely don't want to be filtering out blocks which have -already failed our own internal quality analysis, otherwise the value of such -testing will be almost as tainted as that of the people who say "after whitening -our RNG with SHA-1 it now passes all of the statistical tests perfectly!", and -there's already more than enough fossils in that tarpit. - -It is not possible to disable this for data which is passed directly to the -kernel entropy pool, there is absolutely no reason to ever want to do that, -and this does not actually disable the QA checks from being performed (so the -results of them will still be seen in the monitoring output and can generate -external alerts if this mode was entered 'by accident'). It just permits any -failing blocks to still pass through to \fIstdout\fP, so other tools can heap -all the scorn on the output that it deserves if it is failing. +Note that although this option may be specified per-device, the +\fB\-\-kernel\-refill\fP time is a global option, and that setting will be +applied if the configuration for any device uses this option, even if that +device isn't currently available for use. That normally shouldn't be a +problem though, since the question of whether minimising power use is more +important than other considerations is usually a system-wide one anyway. .TP .B " \-\-limit\-max\-xfer" @@ -559,6 +604,286 @@ present only a very small number of systems are still known to be affected, and that number should continue to decrease over time. +.TP +.B " \-\-no\-qa" +Disable gating entropy output on the result of quality and health checking. +You pretty much never want to use this unless you are generating streams to +\fIstdout\fP for no other reason than to analyse their quality with some other +tool, such as \fBdieharder\fP(1) or the NIST test suite or similar. For that +type of use we definitely don't want to be filtering out blocks which have +already failed our own internal quality analysis, otherwise the value of such +testing will be almost as tainted as that of the people who say "after whitening +our RNG with SHA-1 it now passes all of the statistical tests perfectly!", and +there's already more than enough fossils in that tarpit. + +It is not possible to disable this for data which is passed directly to the +kernel entropy pool, there is absolutely no reason to ever want to do that, +nor does it disable the gating for bits requested from the UDP socket +interface. It does not actually disable the QA checks from being performed +(so the results of them will still be seen in the monitoring output and can +generate external alerts if this mode was entered 'by accident'). It just +permits any failing blocks to still pass through to \fIstdout\fP, so other +tools can heap all the scorn on the output that it deserves if it is failing. + + +.SS Extended QA options +Since we already have some high quality QA analysis running on the output of +the BitBabbler devices, it makes sense to also be able to use that to sample, +analyse, and monitor the quality of entropy from other independent sources +and downstream points that end user applications may be obtaining it from too. + +.TP +.BI " \-\-watch=" path : delay : block_size : bytes +Monitor an external device. This option does not directly effect the operation +of collecting entropy from BitBabbler devices, or contribute in any way to the +entropy that is output, either to \fIstderr\fP or the kernel. What it does do +is leverage the quality assurance and health checking algorithms, and the trend +monitoring functionality that this software provides, to also permit continuous +supervision of other sources which are expected to be statistically random. + +For example it can be used to regularly sample from \fI/dev/urandom\fP or even +from \fI/dev/random\fP to ensure the quality of their output is really what you +expect it to be. There's little point to putting the most awesome entropy that +the universe can conjure in, if what's coming out and feeding the applications +that are consuming it is totally predictable garbage. + +If this is used to monitor a limited source of blocking entropy, such as +\fI/dev/random\fP then you'll want to be judicious in selecting the rate of +reading from it, so as not to consume all the available entropy that you were +aiming to gain by feeding it from a BitBabbler in the first. If it's reading +from an 'unlimited' source backed by a PRNG, such as \fI/dev/urandom\fP, then +the only real consideration is how much of the other system resources do you +want to consume in drinking from the firehose. + +The \fIpath\fP is the filesystem path to read from, it can be anything which +can be opened and read from like a normal unix file. The \fIdelay\fP is the +amount of time, in milliseconds, to wait between reading blocks of data from +it. The \fIblock_size\fP is the number of bytes to read in a single block +each time the watch process wakes up to read more. The total amount of data +to read can by limited to \fIbytes\fP, once that limit is reached, the watch +process for \fIpath\fP will end (but all other processing will continue as +per normal). + +All qualifiers except the \fIpath\fP are optional, and separated by colons +with no other space between them, but all options must be explicitly set up to +the last one that is provided. The \fIdelay\fP may be followed by a suffix of +\&'k', 'M', or 'G' to multiply it by the respective power of 10, or by 'ki', +\&'Mi', or 'Gi' for powers of two if you're into that kind of thing. The +\fIblock_size\fP and \fIbytes\fP options may be similarly suffixed, but like +all good sizes on computers are always a power of two if so. + + +.SH CONFIGURATION FILE FORMAT +In addition to use of the command line options, \fBseedd\fP configuration may +be supplied by "INI" format files encoded as \fBSections\fP, \fBOptions\fP and +\fBValues\fP. Since there is no standard definition for that format, the +general rules applicable here are as follows: + +A \fBSection\fP definition begins on a line where its name is enclosed in +square brackets. The Section name itself may contain any characters except +square brackets. Any characters following the closing square bracket on the +same line will simply be ignored, but a well formed file should not rely on +that always remaining true. + +All following \fBOption\fP/\fBValue\fP pairs belong to that Section until the +next Section header occurs. Option names may include any characters except +whitespace. Leading and trailing whitespace around Option names and Values is +ignored. Internal whitespace in Option values is preserved. Options must be +defined on a single line, and everything (except leading and trailing +whitespace) following the Option name up to the end of the line is part of the +Value. Quote characters (of any sort) have no special meaning and will be +included as a literal part of the value. + +Comments must appear on their own line, with the first (non-whitespace) +character of the line being '#'. + +If Options are duplicated in a configuration file, either in a single file or +when multiple configuration files are used, then any options which are repeated +will override the values which were set for them previously. Options specified +on the command line have a similar precedence, with the exception of +\fB\-\-verbose\fP, they must come after all \fB\-\-config\fP files if they are +to override them and not be overridden by them. + +The following Sections and Options may be used. See the equivalent command +line options (as indicated) for a full description of their behaviour. In +most cases, the option names are the same as the long form of the command line +option. It is an error for an unknown Section or Option to be present. + + +.SS [Service] section +The \fBService\fP section is used to configure process-wide behaviour using +the following options: + +.TP 4 +.B daemon +.br +Fork to the background and run as a daemon process (\fP\-\-daemon\fP). + +.TP +.B kernel +.br +Feed entropy to the OS kernel (\fB\-\-kernel\fP). + +.TP +.B ip\-freebind +.br +Allow binding to an IP address that is non-local or does not (yet) exist +(\fB\-\-ip\-freebind\fP). + +.TP +.BI udp\-out " host" : port +Provide a UDP socket for entropy output (\fB\-\-udp\-out\fP). + +.TP +.BI control\-socket " path" +Where to create the service control socket (\fB\-\-control\-socket\fP). + +.TP +.BI socket\-group " group" +Give users in this system group permission to access the control socket +(\fB\-\-socket\-group\fP). + +.TP +.BI verbose " level" +Set the logging verbosity level. A \fIlevel\fP of 2 is equivalent to using +\fB\-vv\fP (\fB\-\-verbose\fP). + + +.SS [Pool] section +The \fBPool\fP section is used to configure the entropy collection pool. You +normally shouldn't need to change or set any of these options unless you have +very special requirements and know exactly what you are doing and why. + +.TP 4 +.BI size " n" +The size of the internal entropy pool in bytes (\fB\-\-pool\-size\fP). + +.TP +.BI kernel\-device " path" +The device node used to feed fresh entropy to the OS kernel +(\fB\-\-kernel\-device\fP). This option has no effect unless the +\fB\-\-kernel\fP option is being used. + +.TP +.BI kernel\-refill " sec" +The maximum time in seconds before fresh entropy will be added to the OS +kernel, even when it hasn't drained below its usual refill threshold +(\fB\-\-kernel\-refill\fP). This option has no effect unless the +\fB\-\-kernel\fP option is being used. + + +.SS [PoolGroup:\fIn\fP] sections +Defines an entropy collecting group and the size of its pool +(\fB\-\-group\-size\fP). The group number \fIn\fP is an integer value used by +the per-device \fB\-\-group\fP option to assign a device to that group. Any +number of groups may be defined, but each must have a unique value of \fIn\fP. + +.TP 4 +.BI size " n" +The size of the group pool (\fB\-\-group\-size\fP). + + +.SS [Devices] section +The \fBDevices\fP section configures the defaults to use for all BitBabbler +devices which don't override them in a per-device section (or on the command +line). All options set here do the same thing as the command line options +with the same name when passed before any \fB\-\-device\-id option. + +.TP 4 +.BI bitrate " Hz" +The rate in bits per second at which to clock raw bits out of the device +(\fB\-\-bitrate\fP). + +.TP +.BI latency " ms" +Override the calculated value for the USB latency timer (\fB\-\-latency\fP). + +.TP +.BI fold " n" +Set the number of times to fold the BitBabbler output before adding it to the +pool (\fB\-\-fold\fP). + +.TP +.BI group " n" +The entropy \fB[PoolGroup:\fP\fIn\fP\fB]\fP to add the device to +(\fB\-\-group\fP). + +.TP +.BI enable\-mask " mask" +Select a subset of the generators on BitBabbler devices with multiple entropy +sources. The argument is an integer bitmask packed from the LSB, with each +bit position controlling an individual source, enabling it when set to 1 +(\fB\-\-enable\-mask\fP). + +.TP +.BI idle\-sleep " initial" : max +Configure how devices back off from generating entropy at the maximum rate when +it is not actually being consumed by anything (\fB\-\-idle\-sleep\fP). + +.TP +.BI suspend\-after " ms" +The threshold in milliseconds where if we expect the device to be idle for +longer than that, we will release our claim on it, allowing the OS to put it +into a low power mode while it is not being used (\fB\-\-suspend\-after\fP). + +.TP +.B low\-power +Enable options for better power saving on lightly loaded systems +(\fB\-\-low\-power\fP). + +.TP +.B limit\-max\-xfer +Limit the maximum transfer chunk size to 16kB (\fB\-\-limit\-max\-xfer\fP). + +.TP +.B no\-qa +Disable gating entropy output on the result of quality and health checking +(\fB\-\-no\-qa\fP). + + +.SS [Device:\fIid\fP] sections +Sections with a \fBDevice:\fP prefix can be used to both enable and configure +individual devices. It is the equivalent of passing +.BI \-\-device\-id= id +on the command line. If any options are specified for this section, that is +equivalent to passing them after the \fB\-\-device\-id\fP option in that they +will only apply to this device and no others. All the options for the +\fB[Devices]\fP section above may be used here. + +If no \fB[Device:]\fP sections are defined, the default is to operate on all of +the devices which are available. It is not an error to define these sections +for devices which are not, or may not be, present at any given time. + + +.SS [Watch:\fIid\fP] sections +Sections of this type can be used to run our QA testing on some other external +source of random bits, provided we can read them as if they were a file or +named pipe or device node. Any number of \fB[Watch:]\fP sections may be +defined, each just needs its own unique label after the \fBWatch:\fP prefix to +identify it. The \fIid\fP has no use or meaning other than to make each watch +section name unique. The options below correspond to the component parts of +the argument passed with \fB\-\-watch\fP on the command line. + +.TP 4 +.BI path " device" +The path to the device/pipe/file to read bits from. This option must be set +for every watch section. + +.TP +.BI delay " ms" +How long to wait before reading the next block of bits, in milliseconds. +Default is 0. + +.TP +.BI block\-size " bytes" +The number of bytes to read between each delay period. Default is 64k. + +.TP +.BI max\-bytes " bytes" +The maximum number of bytes to read in total. Default is 0 which implies +reading an 'infinite' number of bits for as long as the process keeps running, +which is probably what you usually want when using this. + .SH CONTINUOUS MONITORING The query and control socket enables device performance and QA statistics to be @@ -590,28 +915,197 @@ .nh \fI/etc/munin/plugin\-conf.d/bit\-babbler\fP .hy -(where they should be set if desired). The \fBmunin-node\fP service needs to +(where they should be set if desired). The \fBmunin\-node\fP service needs to be restarted for changes to its plugins to take effect. +.SH BOOT SEQUENCING +When \fBseedd\fP is being used to feed entropy to the OS kernel, there are two +main considerations to deal with. On modern systems where the kernel random +source is used by almost every process, if only for ASLR, we want that to be +well seeded with the best entropy available as early as is possible. And +ideally we also want any services which do have a critical need for high +quality entropy to not be started until that can be guaranteed by proper QA +testing of the entropy stream. + +Historically, the mechanics of ensuring all that was not just an OS-specific +detail, but it also varied, sometimes greatly, even between different flavours +of the various OS platforms. So it was up to the higher level packaging for +each OS variant, and the users of them, to implement something appropriate for +each environment. That is still largely true, but for people using Linux +distributions where the \fBinit\fP(1) process is provided by systemd now, we +can in theory provide some defaults which should suit most users and still be +fairly easily customisable for specific use case requirements as well. Since +configuring systemd correctly for anything not completely trivial is still +something of a black art, which a lot of services (and even the systemd +provided ones) seem to still get wrong to some degree or another, it does make +sense for us to provide a tested configuration - along with some guidance to +users of other platforms and init systems about what they should be aiming +for. What follows is a description of the boot sequencing options implemented +for a systemd environment, but the general requirements of that should be +broadly applicable too since they don't do anything magical which couldn't be +done in another alternative environment. + +By default we provide two systemd "service units" to implement the requirements +outlined above. + + +.SS The seedd daemon service (\fIseedd.service\fP) +This unit provides the ordinary functionality of ensuring that \fBseedd\fP is +started at the earliest possible time in the boot sequence where its +requirements are able to be met. For that reason its requirements should be +(and deliberately have been) kept as minimal as reasonably possible. It needs +access to some low-level system libraries, to its \fIseedd.conf\fP +configuration file (though that could be eliminated at the cost of some +user-friendliness by hardcoding the options to run it with in the unit itself), +to a writable directory where it will create its control-socket, and to the +system device files where the BitBabbler and kernel random devices will be +found. + +Since the BitBabbler devices can be hotplugged, we don't actually need to wait +for them to be present to start this - and in practice with the current unit +configuration, \fBseedd\fP is almost certain to be started before the USB +devices have been probed and announced to the system, or before even \fBudev\fP +is running to notify it about them. This means it will be ready to use them at +the soonest possible moment that they do become available. + +This unit is installed by default, but it must still be explicitly enabled, +either by the distro packaging (which we do recommend does this, and which the +Debian packages indeed do), or by the local admin if they manually installed +this software from source themselves. It is the equivalent of what the SysV +init script provided for Debian based systems will do on systems which aren't +using systemd as their init system. It is always safe for \fBseedd\fP to be +running even when no BitBabbler devices are currently available in the system, +it just won't do much unless also configured to watch some external source. + + +.SS Waiting for initial kernel seeding (\fIseedd\-wait.service\fP) +This optional unit is also installed by default, but it generally should +\fBnot\fP be enabled automatically by distro packaging, only at the explicit +request of a local admin. It provides a boot sequence point with some more +complete and useful guarantees: + +.HP 3 +\ - That \fBseedd\fP has successfully been started and is running. + +.HP 3 +\ - That at least one BitBabbler device (or more depending on the configuration +used for \fBseedd\fP) is available and operating correctly, and able to provide +the system with fresh QA checked entropy. + +.HP 3 +\ - That good quality entropy obtained from the available device(s) has been +provided as initial seed material to the OS kernel. + +.PP +If simply enabled on its own, this unit will delay starting anything which is +scheduled to be started later than \fBseedd\fP in the boot sequence (or more +specifically, anything which wouldn't be started until after all of the local +mount points in \fI/etc/fstab\fP have been mounted - which should be before +most services that aren't part of the early boot initialisation), until the +three conditions above have been met. It will wait for up to 30 seconds for +that to occur before timing out and entering a failed state, after which the +rest of the boot sequence will then still continue normally. + +This provides a reasonable compromise between a guarantee that good entropy +will actually be used if it is possible to obtain it, and not rendering the +system completely unable to boot if for some reason it is not. If you wish +to enable it, you can do that with: + + # systemctl enable seedd\-wait.service + +If you wish to change the timeout period, you will need to edit or override +this unit to change either or both of the timeout used in \fB\-\-waitfor\fP +and the \fBTimeoutStartSec\fP option. It should be long enough for devices to +become available, and have enough entropy read from them to be QA checked for +use as early seed material, but not so long that booting is delayed needlessly +when it is clear that nothing is likely to change if we just wait longer. + + +.SS When failure is not an option +If a stronger guarantee than the above really is needed, either system-wide or +just for particular services, then declaring a \fBRequires\fP relationship with +this unit will prevent anything which does so from starting, both before this +task has completed and if this task should fail. For example if you wanted to +prevent \fBapache2\fP(8) from starting if this unit's checks should fail, then +you could do: + + # systemctl add\-requires apache2.service seedd\-wait + +Or equivalently (which is what the above command does): + +.nh +.nf + # mkdir /etc/systemd/system/apache2.service.requires + # ln \-s /lib/systemd/system/seedd\-wait.service /etc/systemd/system/apache2.service.requires +.fi +.hy + +Which will work for older systems where \fBsystemctl\fP does not yet support +the \fBadd\-requires\fP command, and with generated units (such as those for +services which still provide only a SysV init script), which at the time of +writing \fBsystemctl\fP failed to support. Any number of other units may have +a \fBRequires\fP dependency retrofitted like this, or may even include it in +their own unit file if appropriate. + + +.SS Go big or go visit the server room +If you want the strongest guarantee for all normal services running on the +system, so that none of them will be started if this initial boot test fails, +then you can do something like the following, which if it fails will put the +system into a minimal single-user mode with only an emergency admin shell +available to someone with console access who knows the root password: + +.nh +.nf + # mkdir /etc/systemd/system/seedd\-wait.service.d/ + # cat > /etc/systemd/system/seedd\-wait.service.d/failure.conf < +// Copyright 2015 - 2018, Ron #ifndef _BB_ALIGNED_RECAST_H #define _BB_ALIGNED_RECAST_H @@ -21,6 +21,7 @@ // Return true if pointer p is aligned to some multiple of S. template< size_t S > + BB_CONST bool IsAligned( const void *p ) { //{{{ @@ -46,6 +47,7 @@ // Return true if pointer p is aligned to some multiple of the alignment of type T. template< typename T > + BB_CONST bool IsAligned( const void *p ) { return IsAligned< alignment_of::value >( p ); @@ -53,6 +55,14 @@ + // For some reason GCC 4.9.2 thinks aligned_recast() can be declared const, + // but that seems wrong because it can throw and calls stringprintf, and + // empirically, if we declare these with the const attribute then the unit + // tests fail ... so squelch the warning. + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpragmas" + #pragma GCC diagnostic ignored "-Wsuggest-attribute=const" + // Safe cast back to a type with increased alignment. // // This will cast pointer p, to type T, after asserting that it is already @@ -82,6 +92,7 @@ return aligned_recast< T, alignment_of::value >( p ); } + #pragma GCC diagnostic pop } #endif // _BB_ALIGNED_RECAST_H diff -Nru bit-babbler-0.7/include/bit-babbler/client-socket.h bit-babbler-0.8/include/bit-babbler/client-socket.h --- bit-babbler-0.7/include/bit-babbler/client-socket.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/client-socket.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2004 - 2015, Ron +// Copyright 2004 - 2018, Ron #ifndef _BB_CLIENT_SOCKET_H #define _BB_CLIENT_SOCKET_H @@ -7,8 +7,6 @@ #include #include -#include - namespace BitB { @@ -156,7 +154,7 @@ Log<4>( "ClientSock::read( %zu ): %zu bytes at %zu\n", size, n, m_fill ); - m_fill += n; + m_fill += size_t(n); } } //}}} @@ -215,7 +213,7 @@ if( n < 0 ) throw SocketError( _("ClientSock::write( %zu ): failed"), len ); - return n; + return size_t(n); } //}}} @@ -244,7 +242,7 @@ if( w == 0 ) throw Error( _("ClientSock::SendRequest: write EOF") ); - c -= w; + c -= size_t(w); } } //}}} diff -Nru bit-babbler-0.7/include/bit-babbler/control-socket.h bit-babbler-0.8/include/bit-babbler/control-socket.h --- bit-babbler-0.7/include/bit-babbler/control-socket.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/control-socket.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2004 - 2016, Ron +// Copyright 2004 - 2018, Ron #ifndef _BB_CONTROL_SOCKET_H #define _BB_CONTROL_SOCKET_H @@ -49,7 +49,7 @@ if( w == 0 ) throw Error( _("ControlSock::Connection( %d ): write EOF"), m_fd ); - c -= w; + c -= size_t(w); } } //}}} @@ -71,7 +71,7 @@ { std::string id; - if( json != (Json*)NULL ) + if( json.IsNotNULL() ) id = json->Get(2); send_response( "[\"ReportStats\"," + stringprintf("%zu,", token) @@ -83,7 +83,7 @@ { std::string id; - if( json != (Json*)NULL ) + if( json.IsNotNULL() ) id = json->Get(2); send_response( "[\"GetRawData\"," + stringprintf("%zu,", token) @@ -93,8 +93,8 @@ if( cmd == "SetLogVerbosity" ) { - if( json != (Json*)NULL ) - opt_verbose = json[2]; + if( json.IsNotNULL() ) + opt_verbose = json[2]->As(); Log<0>( "Log verbosity is now %d\n", opt_verbose ); @@ -138,7 +138,7 @@ if( json.RootType() == Json::ArrayType ) { try { - process_request( req, json[0], json[1], json ); + process_request( req, json[0], json[1]->As(), json ); return; } catch( const abi::__forced_unwind& ) { throw; } @@ -184,7 +184,7 @@ Log<3>( "ControlSock::Connection( %d ): read %zu bytes at %zu\n", m_fd, n, f ); - f += n; + f += size_t(n); size_t b = 0; @@ -246,6 +246,9 @@ Connection::Handle c = static_cast(p); + // Drop the 'virtual handle' from the ctor, we have a real one now. + c->Unref(); + try { c->do_connection_thread(); } @@ -288,6 +291,14 @@ Log<2>( "+ ControlSock::Connection( %d )\n", fd ); + // Bump the refcount until the thread is started, otherwise we may + // lose a race with this Connection being released by the caller + // before the thread can take its handle from the raw pointer. + // Think of it as a virtual Handle passed with pthread_create. + Ref(); + + // We don't need to Unref() if this fails, because we'll throw + // and it will never have been constructed to be destroyed ... int ret = pthread_create( &m_connectionthread, GetDefaultThreadAttr(), connection_thread, this ); if( ret ) @@ -412,7 +423,8 @@ protected: void ListenSocket( int domain, int type, int protocol, - const struct sockaddr *addr, socklen_t addrlen ) + const struct sockaddr *addr, socklen_t addrlen, + bool freebind = false ) { //{{{ static const int LISTEN_BACKLOG = 5; @@ -423,6 +435,9 @@ throw SocketError( _("ControlSock( %s ): failed to create socket"), m_id.c_str() ); + if( freebind ) + EnableFreebind( m_fd, stringprintf("ControlSock( %s )", m_id.c_str()) ); + if( bind( m_fd, addr, addrlen ) == -1 ) throw SocketError( _("ControlSock( %s ): failed to bind socket"), m_id.c_str() ); @@ -613,7 +628,7 @@ throw SystemError( _("ControlSock( %s ): failed to chmod %.4o '%s'"), path.c_str(), dirmode, dir.c_str() ); - if( using_group() && chown( dir.c_str(), -1, m_gid ) ) + if( using_group() && chown( dir.c_str(), uid_t(-1), m_gid ) ) throw SystemError( _("ControlSock( %s ): failed to chown '%s' to group %s."), path.c_str(), dir.c_str(), m_group.c_str() ); @@ -735,7 +750,7 @@ throw SystemError( _("ControlSock( %s ): failed to chmod %.4o socket"), path.c_str(), sockmode ); - if( using_group() && chown( path.c_str(), -1, m_gid ) ) + if( using_group() && chown( path.c_str(), uid_t(-1), m_gid ) ) throw SystemError( _("ControlSock( %s ): failed to chown socket to group %s."), path.c_str(), m_group.c_str() ); @@ -848,7 +863,7 @@ typedef RefPtr< ControlSockTCP > Handle; - ControlSockTCP( const std::string &addr ) + ControlSockTCP( const std::string &addr, bool freebind = false ) : ControlSock( addr ) { Log<2>( "+ ControlSockTCP( '%s' )\n", addr.c_str() ); @@ -862,7 +877,7 @@ addr.c_str(), sa.addr.any.sa_family ); ListenSocket( sa.addr.any.sa_family, sa.addr_type, sa.addr_protocol, - &sa.addr.any, sa.addr_len ); + &sa.addr.any, sa.addr_len, freebind ); start_server_thread(); } @@ -871,14 +886,15 @@ static inline ControlSock::Handle CreateControlSocket( const std::string &addr, - const std::string &group = std::string() ) + const std::string &group = std::string(), + bool freebind = false ) { //{{{ if( addr == "none" ) return NULL; if( addr.find("tcp:") == 0 ) - return new ControlSockTCP( addr.substr(4) ); + return new ControlSockTCP( addr.substr(4), freebind ); return new ControlSockUnix( addr, group ); diff -Nru bit-babbler-0.7/include/bit-babbler/exceptions.h bit-babbler-0.8/include/bit-babbler/exceptions.h --- bit-babbler-0.7/include/bit-babbler/exceptions.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/exceptions.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2003 - 2017, Ron +// Copyright 2003 - 2018, Ron #ifndef _BB_EXCEPTIONS_H #define _BB_EXCEPTIONS_H @@ -14,20 +14,33 @@ #define BB_NORETURN __attribute__ ((noreturn)) +#define BB_CONST __attribute__ ((const)) +#define BB_PURE __attribute__ ((pure)) #if EM_PLATFORM_MSW - // The format checking misfires badly in mingw-w64 4.9.2-21+15.4 - // and complains about format characters that are supported, so - // just disable it there until that gets fixed. - #define BB_PRINTF_FORMAT( fmt, arg1, ... ) + // We force the gnu_printf checking here (instead of letting it assume printf + // checking should use ms_printf), because we already rely on other mingw + // extensions, and with _GNU_SOURCE defined then (on C++) mingw will enable + // __USE_MINGW_ANSI_STDIO to support the full set of POSIX format characters. + #define BB_PRINTF_FORMAT( fmt, arg1, ... ) \ + __attribute__ ((format (__gnu_printf__,fmt,arg1), ##__VA_ARGS__)) + + // Likewise we use the gnu_strftime checking here because otherwise it will + // warn about %T and %F (which we do use, and which the msvcrt.dll does not + // implement), but timeprintf will convert them to equivalents it is ok with. + #define BB_STRFTIME_FORMAT( fmt, arg1, ... ) \ + __attribute__ ((format (__gnu_strftime__,fmt,arg1), ##__VA_ARGS__)) #else #define BB_PRINTF_FORMAT( fmt, arg1, ... ) \ __attribute__ ((format (__printf__,fmt,arg1), ##__VA_ARGS__)) + #define BB_STRFTIME_FORMAT( fmt, arg1, ... ) \ + __attribute__ ((format (__strftime__,fmt,arg1), ##__VA_ARGS__)) + #endif @@ -55,6 +68,7 @@ namespace BitB { + BB_PRINTF_FORMAT(2,0) static inline int Vasprintf( char **strp, const char *format, va_list arglist ) { //{{{ @@ -105,6 +119,7 @@ m_msg = msg; } + BB_PRINTF_FORMAT(2,0) void SetMessage( const char *format, va_list args ) throw() { char *msg = NULL; @@ -128,6 +143,11 @@ va_end( arglist ); } + void AppendMessage( const std::string &msg ) throw() + { + m_msg.append( msg ); + } + const char *what() const throw() { return m_msg.empty() ? "Unspecified BitB::Exception" : m_msg.c_str(); @@ -183,28 +203,24 @@ SystemError( const char *format, ... ) throw() : m_errno( errno ) { - va_list arglist; - std::string msg( format ); - - msg.append( ": " ).append( strerror(m_errno) ); - + va_list arglist; va_start( arglist, format ); - SetMessage( msg.c_str(), arglist ); + SetMessage( format, arglist ); va_end( arglist ); + + AppendMessage( std::string(": ") + strerror(m_errno) ); } BB_PRINTF_FORMAT(3,4) SystemError( int code, const char *format, ... ) throw() : m_errno( code ) { - va_list arglist; - std::string msg( format ); - - msg.append( ": " ).append( strerror(m_errno) ); - + va_list arglist; va_start( arglist, format ); - SetMessage( msg.c_str(), arglist ); + SetMessage( format, arglist ); va_end( arglist ); + + AppendMessage( std::string(": ") + strerror(m_errno) ); } diff -Nru bit-babbler-0.7/include/bit-babbler/ftdi-device.h bit-babbler-0.8/include/bit-babbler/ftdi-device.h --- bit-babbler-0.7/include/bit-babbler/ftdi-device.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/ftdi-device.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2010 - 2016, Ron +// Copyright 2010 - 2018, Ron #ifndef _BB_FTDI_DEVICE_H #define _BB_FTDI_DEVICE_H @@ -331,7 +331,7 @@ if( ret != 2 ) ThrowError( _("FTDI: get modem status returned %d bytes"), ret ); - return (ms[0] << 8) | ms[1]; + return uint16_t( (ms[0] << 8) | ms[1] ); } //}}} @@ -370,7 +370,7 @@ pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, &oldstate ); int xfer; - int n = std::min( len, m_chunksize ); + int n = int(std::min( len, m_chunksize )); int ret = libusb_bulk_transfer( *m_dh, m_epout, b, n, &xfer, m_timeout ); pthread_setcancelstate( oldstate, NULL ); @@ -384,7 +384,7 @@ if( __builtin_expect(xfer < 0 || size_t(xfer) > len,0) ) ThrowError( _("FTDI: OOPS write of %d returned %d ..."), n, xfer ); - len -= xfer; + len -= unsigned(xfer); b += xfer; break; @@ -409,13 +409,13 @@ int oldstate; int xfer; - int n = std::min( len, m_chunksize ); + int n = int(std::min( len, m_chunksize )); // Ensure we always request a multiple of m_maxpacket, otherwise // we can get an overflow from the last packet that is received, // since the transfer size isn't sent to the device and it might // still send a 'full' packet even if we wanted less than that. - n = round_to_maxpacket(n); + n = int(round_to_maxpacket(size_t(n))); pthread_testcancel(); pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, &oldstate ); @@ -436,7 +436,7 @@ if( __builtin_expect(xfer < 0 || xfer > n, 0) ) ThrowError( _("FTDI: OOPS read of %d returned %d ..."), n, xfer ); - return xfer; + return size_t(xfer); } ThrowUSBError( ret, _("FTDI: read chunk of %d bytes failed"), n ); @@ -824,7 +824,7 @@ purge_read(); ftdi_set_special_chars(); - ftdi_set_latency_timer( m_latency ); + ftdi_set_latency_timer( uint8_t(m_latency) ); ftdi_set_flow_control( FLOW_RTS_CTS ); ftdi_set_bitmode( BITMODE_RESET ); ftdi_set_bitmode( BITMODE_MPSSE ); diff -Nru bit-babbler-0.7/include/bit-babbler/impl/log.h bit-babbler-0.8/include/bit-babbler/impl/log.h --- bit-babbler-0.7/include/bit-babbler/impl/log.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/impl/log.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2003 - 2017, Ron +// Copyright 2003 - 2018, Ron // // This file provides the implementation detail for bit-babbler/log.h // which must be defined only once in an application. @@ -242,7 +242,7 @@ } //}}} - std::string timeprintf( std::string format, const timeval &tv ) + std::string timeprintf( const char *format, const timeval &tv ) { //{{{ using std::string; @@ -252,53 +252,48 @@ struct tm tm; string::size_type n = 0; - // First substitute microseconds if required. - while( ( n = format.find( "%%", n, 2 ) ) != string::npos ) - { - string::size_type end = format.find( 'u', n ); - - if( end != string::npos ) - { - string::size_type len = end - n; - - char usec[ 7 ]; - snprintf( usec, sizeof( usec ), "%06ld", long(tv.tv_usec) ); - - if( len == 2 ) - { - format.replace( n, len + 1, usec, 6 ); - } - else if( len == 3 ) - { - unsigned int width; - if( sscanf( string( format, n + 2, len - 2 ).c_str(), "%u", &width ) == 1 ) - format.replace( n, len + 1, usec, std::min(width,6u) ); - } - n = end; - } - } - // time_t may not be the same size as tv_sec, and indeed on 64-bit Windows // (and possibly OpenBSD among others) time_t is long long while tv_sec is // only long. - time_t sec = tv.tv_sec; + time_t sec = tv.tv_sec; + // First format a string according to the standard specifiers. #if HAVE_LOCALTIME_R - // Then format the rest according to standard specifiers. localtime_r( &sec, &tm ); #else memcpy( &tm, localtime( &sec ), sizeof( tm ) ); #endif + #if EM_PLATFORM_MSW + // We lose the ability to have compile time format checking of strftime + // by doing this but for that function it's of fairly limited value and + // as of gcc-7 we need to disable it here anyway because it doesn't see + // that this function itself is format-checked and so still whines that + // the format is not a literal even though it just checked that it is! + string fmt = format; + // Windows doesn't support the %T format option, so expand it manually here. + while( ( n = fmt.find( "%T", n, 2 ) ) != string::npos ) + fmt.replace( n, 2, "%H:%M:%S" ); + + n = 0; + + // Windows doesn't support the %F format option, so expand it manually here. + while( ( n = fmt.find( "%F", n, 2 ) ) != string::npos ) + fmt.replace( n, 2, "%Y-%m-%d" ); + n = 0; - while( ( n = format.find( "%T", n, 2 ) ) != string::npos ) - format.replace( n, 2, "%H:%M:%S" ); #endif + + // gcc-7 complains about the format parameter passed to strftime, even + // though it already checked it when this function itself was called. + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wformat-nonliteral" + #if EM_PLATFORM_LINUX // Querying the string length with a NULL for the output is a GNU extension. @@ -307,7 +302,7 @@ // It's also tricky to actually test for that, since on systems where it // doesn't work, there is no clear indication except perhaps a segfault. // So we'll be conservative and only use it where we know it really works. - size_t size = strftime( NULL, MAX_LEN, format.c_str(), &tm ); + size_t size = strftime( NULL, MAX_LEN, format, &tm ); #else @@ -319,8 +314,46 @@ { char buf[ size + 1 ]; - if( strftime( buf, size + 1, format.c_str(), &tm ) > 0 ) + #if EM_PLATFORM_MSW + + if( strftime( buf, size + 1, fmt.c_str(), &tm ) > 0 ) timestr = buf; + + #else + + if( strftime( buf, size + 1, format, &tm ) > 0 ) + timestr = buf; + + #endif + } + + #pragma GCC diagnostic pop + + + // Then substitute microseconds if required. + while( ( n = timestr.find( '%', n ) ) != string::npos ) + { + string::size_type end = timestr.find( 'u', n ); + + if( end != string::npos ) + { + string::size_type len = end - n; + + char usec[ 7 ]; + snprintf( usec, sizeof( usec ), "%06ld", long(tv.tv_usec) ); + + if( len == 1 ) + { + timestr.replace( n, len + 1, usec, 6 ); + } + else if( len == 2 ) + { + unsigned int width; + if( sscanf( string( timestr, n + 1, len - 1 ).c_str(), "%u", &width ) == 1 ) + timestr.replace( n, len + 1, usec, std::min(width,6u) ); + } + n = end; + } } return timestr; @@ -362,6 +395,9 @@ #ifdef _REENTRANT + #if ! THREAD_STACK_SIZE + BB_CONST + #endif const pthread_attr_t *GetDefaultThreadAttr() { //{{{ @@ -407,12 +443,17 @@ #if EM_PLATFORM_MSW -void openlog(const char *ident, int option, int facility) +BB_CONST void openlog(const char *ident, int option, int facility) { (void)ident; (void)option; (void)facility; } -void vsyslog(int priority, const char *format, va_list ap) +BB_CONST void syslog(int priority, const char *format, ...) +{ + (void)priority; (void)format; +} + +BB_CONST void vsyslog(int priority, const char *format, va_list ap) { (void)priority; (void)format; (void)ap; } diff -Nru bit-babbler-0.7/include/bit-babbler/iniparser.h bit-babbler-0.8/include/bit-babbler/iniparser.h --- bit-babbler-0.7/include/bit-babbler/iniparser.h 1970-01-01 00:00:00.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/iniparser.h 2018-02-07 23:56:52.000000000 +0000 @@ -0,0 +1,1695 @@ +//////////////////////////////////////////////////////////////////// +// +//! @file iniparser.h +//! @ingroup INIParsing +//! @brief Parser for INI formatted data. +// +// Copyright 2013 - 2018, Ron +// This file is distributed as part of the bit-babbler package. +// +//////////////////////////////////////////////////////////////////// + +#ifndef _BB_INIPARSER_H +#define _BB_INIPARSER_H + +#include +#include + + +// We don't hash the Section and Option maps by default right now. +// Most of the structures we are expecting to handle at this stage will all be +// relatively small, so the speed benefit is likely to be minimal (or may even +// be non-existant or negative), and the deterministic ordering of a sorted map +// is more user friendly for data that users might see. +// If we ever need to enable this, we should benchmark it, and then possibly +// consider templating the IniData class to allow both. For now, keep it simple. +//#define BB_HASH_INIDATA + +#ifdef BB_HASH_INIDATA + #include +#else + #include +#endif + + +// Enable these if you need low level debug output from IniData. +//#define BB_DEBUG_INIPARSER +//#define BB_DEBUG_INIVALIDATOR + +// Allow this to be overridden so the unit tests can send it to stdout. +#ifndef BB_DEBUG_INI_LOGSTREAM + //! Where to output debug logs. + #define BB_DEBUG_INI_LOGSTREAM stderr +#endif + +//! Debug logging for the parser implementation. +#ifdef BB_DEBUG_INIPARSER + #define debug_inip(...) fprintf(BB_DEBUG_INI_LOGSTREAM, ##__VA_ARGS__); +#else + #define debug_inip(...) +#endif + +//! Debug logging for the validator implementation. +#ifdef BB_DEBUG_INIVALIDATOR + #define debug_iniv(...) fprintf(BB_DEBUG_INI_LOGSTREAM, ##__VA_ARGS__); +#else + #define debug_iniv(...) +#endif + + +namespace BitB +{ + +//! @defgroup INIParsing INI data parsing +//! @brief Support for INI formatted data. +//! @ingroup DataStorage +//!@{ + + //! Parser and container class for INI format structured data. + //{{{ + //! The data encoded in this format consists of @b Sections, @b Options and + //! @b Values. Since there is no standard definition for this format, the + //! rules for this particular parser are defined as follows: + //! + //! A section definition begins on a line where its name is enclosed in + //! square brackets. The section name itself may contain any characters + //! except square brackets, and they have no intrinsic special meaning + //! except as a string identifier. Any characters following the closing + //! square bracket on the same line will simply be ignored. While this may + //! be used to add comments there, doing so should probably be discouraged + //! as a stylistic idiom, and it may be desirable in the future to be more + //! strict and reject any trailing 'junk' following a section header. + //! + //! All following option/value pairs belong to that section until the next + //! section header occurs. Option names may include any characters except + //! whitespace. Leading and trailing whitespace around option names and + //! values is ignored. Internal whitespace in option values is preserved. + //! Options must be defined on a single line, and everything (except leading + //! and trailing whitespace) following the option name up to the end of the + //! line is part of the value. Quote characters (of any sort) have no + //! special meaning to this parser and will be included as a literal part of + //! the value (individual applications however may apply any special meaning + //! to any character in the option name or value that they please). To this + //! parser, both option names and their values are simply literal strings. + //! + //! Comments must appear on their own line, with the first (non-whitespace) + //! character of the line being '#'. + //! + //! For example: + //! @code + //! [my-section] + //! # Option 1 does something important, this is a comment about it. + //! option1 value1 + //! # You may not need option 2. + //! # option2 value2 3 and4 + //! + //! [another-section] + //! # This option has the same name as one above, + //! # but is in a different section. + //! option2 some other value + //! watchout 1 #note: here '#' is part of the value, not a comment. + //! @endcode + //! + //! Section names must be unique. Multiple appearances of the same section + //! name is an error. When multiple sections of the same type are required, + //! a sensible convention to use is the form: + //! + //! @code + //! [name:label] + //! @endcode + //! Where @c name is the type of the section and @c label is a unique identifier + //! of the particular instance being defined. But this is merely a convention, + //! the ':' character has no special significance beyond what an application + //! attributes to particular section names. + //! + //! Option names must be unique within a section. If an option name is used + //! more than once in the same section, then a parsing error will be reported. + //! Option values are optional. An option with no value will be assigned the + //! empty string as its value. + //! + //! As an exception to the normal uniqueness rules above, when additional INI + //! format data is imported with the @c UpdateWith() method, then any sections + //! or options which overlap with already existing data will be merged with it + //! without error, allowing such things as reading multiple files with option + //! settings in the later files overridding any value set previously. This + //! should be used with appropriate caution, since it could also allow typos + //! to 'silently' stomp over some other configuration option accidentally. + //! + //! By default, the above rules are the only constraints applied when parsing + //! the input data, no other limit is placed on what section names will be + //! acceptable, what options they may contain, or what values those may be + //! assigned. Everything the parser reads which is in the correct format will + //! be imported. For most application use, some stronger checking will be + //! desirable, to quickly spot typos in expected section or option names, or + //! names which simply aren't valid for use in the current version. And to + //! sanity check that values are of some expected type or form or range. + //! The @c Validator class provides a simple way to construct introspective + //! checks of the data content to ensure that it is valid in more than just + //! its form as generic INI data. + //}}} + class IniData : public RefCounted + { //{{{ + public: + + //! @name Container types + //@{ //{{{ + + //! Container type for %IniData Options and their values. + #ifdef EG_HASH_INIDATA + typedef std::tr1::unordered_map< std::string, std::string > Options; + #else + typedef std::map< std::string, std::string > Options; + #endif + + //@} //}}} + + //! @name Handle type + //@{ //{{{ + + //! Handle type for a @c %IniData parser instance. + typedef RefPtr< IniData > Handle; + + //@} //}}} + + + //! Container for the options in a single INI section. + class Section : public RefCounted + { //{{{ + private: + + std::string m_name; //!< The name of this section. + Options m_options; //!< The Options it contains. + + + public: + + //! @name Handle type + //@{ //{{{ + + //! Handle type for a @c %Section instance. + typedef RefPtr< Section > Handle; + + //@} //}}} + + //! @name Container types + //@{ //{{{ + + //! Container type for IniData Sections. + #ifdef EG_HASH_INIDATA + typedef std::tr1::unordered_map< std::string, Handle > Map; + #else + typedef std::map< std::string, Handle > Map; + #endif + + //@} //}}} + + + //! @name Constructor + //@{ //{{{ + + //! Construct a new section with the given @a name. + Section( const std::string &name ) + : m_name( name ) + {} + + //@} //}}} + + + //! @name Section construction methods + //@{ //{{{ + + //! Add a new Option to this %Section. + //{{{ + //! This method will not alter any existing data, it will just define + //! an additional new Option in this %Section. + //! + //! @param option The identifier name of the new option. + //! @param value The value to set for this option. If not provided, + //! the option's value will be an empty string. + //! + //! @exception Error will be thrown if the @a option is already defined + //! in this %Section. + //}}} + void AddOption( const std::string &name, + const std::string &value = std::string() ) + { //{{{ + + if( HasOption( name ) ) + throw Error( "Duplicated option '%s' in Section '%s'", + name.c_str(), m_name.c_str() ); + + m_options[name] = value; + + } //}}} + + //! Change the value of an existing Option. + //{{{ + //! The option being updated must already exist in this %Section. + //! + //! @param option The identifier name of the option. + //! @param value The new value to set for this option. If not provided, + //! the option's value will be an empty string. + //! + //! @exception Error will be thrown if the @a option is not already + //! defined in this %Section. + //}}} + void UpdateOption( const std::string &name, + const std::string &value = std::string() ) + { //{{{ + + if( ! HasOption( name ) ) + throw Error( "Option '%s' is not defined in Section '%s'", + name.c_str(), m_name.c_str() ); + + m_options[name] = value; + + } //}}} + + //! %Set or change the value of an Option. + //{{{ + //! The option will be added if it does not already exist, else its value + //! will be updated if it does. + //! + //! @param option The identifier name of the option. + //! @param value The value to set for this option. If not provided, + //! the option's value will be an empty string. + //}}} + void AddOrUpdateOption( const std::string &name, + const std::string &value = std::string() ) + { + m_options[name] = value; + } + + //@} //}}} + + //! @name Removal methods + //@{ //{{{ + + //! Remove an Option from this @c %Section. + //{{{ + //! @param name The identifier name of the option to remove. + // + //! @return @c true if an option with that name existed in this section + //! and was removed. + //}}} + bool RemoveOption( const std::string &name ) + { + return m_options.erase( name ) > 0; + } + + //@} //}}} + + //! @name Accessor methods + //@{ //{{{ + + //! Return the name of this %Section. + const std::string &GetName() const + { + return m_name; + } + + //! Return @c true if option @a name is defined in this %Section. + bool HasOption( const std::string &name ) const + { + return m_options.find( name ) != m_options.end(); + } + + //! Return the value of option @a name in this %Section. + //{{{ + //! @exception Error will be thrown if @a name is not defined in + //! this %Section. + //}}} + std::string GetOption( const std::string &name ) const + { //{{{ + + Options::const_iterator i = m_options.find( name ); + + if( i == m_options.end() ) + throw Error( "Section '%s' has no option '%s' defined", + m_name.c_str(), name.c_str() ); + return i->second; + + } //}}} + + //! Query the value of option @a name in this %Section. + //{{{ + //! This method will not throw if the option is not defined in this + //! %Section, it will return the @a default_value provided for it + //! instead. + //! + //! @param name The option that a value is wanted for. + //! @param default_value A value to return if @a name is not + //! defined in this %Section. + //}}} + std::string GetOption( const std::string &name, + const std::string &default_value ) const + { //{{{ + + Options::const_iterator i = m_options.find( name ); + + if( i == m_options.end() ) + return default_value; + + return i->second; + + } //}}} + + //! Return a map of all options in this %Section. + const Options &GetOptions() const + { + return m_options; + } + + //@} //}}} + + //! @name Output methods + //@{ //{{{ + + //! Return an INI encoded string of this %Section and its Options. + //{{{ + //! @note The string returned may not be identical to the one that + //! was originally parsed. The ordering of Options may not + //! be preserved and insignificant whitespace may be different. + //}}} + std::string INIStr() const + { //{{{ + + std::string s( '[' + m_name + "]\n" ); + + for( Options::const_iterator i = m_options.begin(), + e = m_options.end(); i != e; ++i ) + { + if( i->second.empty() ) + s.append( i->first + '\n' ); + else + s.append( i->first + ' ' + i->second + '\n' ); + } + + return s; + + } //}}} + + //@} //}}} + + }; //}}} + + + //! @name Container types + //@{ //{{{ + + //! Container type for %IniData Sections. + typedef Section::Map Sections; + + //@} //}}} + + + //! Generic support for validating INI Sections and Options. + //{{{ + //! This class makes it easy to define the set of allowable %Section names, + //! or patterns of %Section names, and the names of Options and acceptable + //! values for them in each of those sections. + //}}} + class Validator : public RefCounted + { //{{{ + public: + + //! @name Test method signatures + //@{ //{{{ + + //! Signature type for functions used to match Section names. + //{{{ + //! Functions with this signature are passed as the @a method parameter + //! of @c Validator::Section() when creating a %Validator instance. + //! + //! @param expect The string we expect to match against. + //! @param seen The string we are checking for a match. + //! + //! @return @c true if @a seen is a match to what we @a expect, + //! according to whatever criteria the implementation + //! intends to apply. + //! + //! Implementations of this are not usually expected to ever throw. + //}}} + typedef bool(*section_name_test)( const std::string &expect, + const std::string &seen ); + + //! Signature type for functions used to check option values. + //{{{ + //! Functions with this signature are passed as the @a method parameter + //! of @c OptionList::AddTest() when creating a %Validator instance. + //! + //! @param option The name of the option being checked. + //! @param value The value it was assigned. + //! + //! @exception Error is expected to be thrown if the value is + //! not acceptable, along with a message suitable + //! for presentation to the end-user explaining why. + //}}} + typedef void(*option_value_test)( const std::string &option, + const std::string &value ); + + //@} //}}} + + + //! @name Section name test methods + //! Standard comparison functions which may be passed as the + //! @ref section_name_test parameter to Validator::Section(). + //! Alternative test methods may also be provided by other + //! application code. + //@{ //{{{ + + //! A @ref section_name_test for section names strictly equal to @a expect. + //{{{ + //! @param expect The string this test expects to match to. + //! @param seen The string we are checking for a match. + //! + //! @return @c true if @a seen equals @a expect. + //}}} + static bool SectionNameEquals( const std::string &expect, const std::string &seen ) + { + return expect == seen; + } + + //! A @ref section_name_test for section names prefixed by @a expect. + //{{{ + //! @param expect The string prefix this test expects to match to. + //! @param seen The string we are checking for a match. + //! + //! @return @c true if @a seen starts with @a expect. + //}}} + static bool SectionNamePrefix( const std::string &expect, const std::string &seen ) + { + return StartsWith( expect, seen ); + } + + //@} //}}} + + //! @name Option value test methods + //! Standard comparison functions which may be passed as the + //! @ref option_value_test parameter to OptionList::AddTest(). + //! Alternative test methods may also be provided by other + //! application code. + //@{ //{{{ + + //! An @ref option_value_test for options which must have some value. + //{{{ + //! @exception Error will be thrown if @a option does not have a + //! non-empty @a value assigned to it. + //}}} + static void OptionWithValue( const std::string &option, const std::string &value ) + { //{{{ + + if( value.empty() ) + throw Error( _("Option '%s' expects a value."), option.c_str() ); + + } //}}} + + //! An @ref option_value_test for options which must not have a value. + //{{{ + //! @exception Error will be thrown if @a option has any non-empty + //! @a value assigned to it. + //}}} + static void OptionWithoutValue( const std::string &option, const std::string &value ) + { //{{{ + + if( ! value.empty() ) + throw Error( _("Option '%s' should not have a value assigned."), + option.c_str() ); + } //}}} + + //! An @ref option_value_test for options which may optionally have a value. + //{{{ + //! Using this test permits validating that the @a option name is acceptable + //! without placing any (initial) constraint upon its @a value during validation. + //! It will accept any value, including an empty one. + //}}} + static void OptionWithAnyValue( const std::string &option, const std::string &value ) + { + (void)option; (void)value; + } + + //@} //}}} + + + //! Container for a list of Option validation checks. + class OptionList : public RefCounted + { //{{{ + private: + + //! @name Container types + //@{ //{{{ + + //! Container type for Option names and the functions to test their values. + #ifdef EG_HASH_INIDATA + typedef std::tr1::unordered_map< std::string, option_value_test > Tests; + #else + typedef std::map< std::string, option_value_test > Tests; + #endif + + //@} //}}} + + + //! The map of acceptable Option names to functions for testing their value. + Tests m_tests; + + + public: + + //! @name Handle type + //@{ //{{{ + + //! Handle type for an @c %OptionList instance. + typedef RefPtr< OptionList > Handle; + + //@} //}}} + + + //! @name Constructors + //@{ //{{{ + + //! Create a new, empty, %OptionList. + OptionList() {} + + //! Create a new %OptionList initialised with a single option and its test. + //{{{ + //! @param option_name The option to recognise and apply this test to. + //! @param method The @ref option_value_test used to determine if + //! the value assigned to this option is valid. + //! + //! This is equivalent to: + //! @code + //! OptionList().AddTest( option_name, method ); + //! @endcode + //}}} + OptionList( const std::string &option_name, option_value_test method ) + { + m_tests[option_name] = method; + } + + //@} //}}} + + + //! @name Initialiser methods + //@{ //{{{ + + //! Add (or alter) a test for some valid option name. + //{{{ + //! If there was a previously existing test for @a option_name, it will + //! silently be replaced. + //! + //! @param option_name The option to recognise and apply this test to. + //! @param method The @ref option_value_test used to determine if + //! the value assigned to this option is valid. + //! + //! @return A pointer to this @c %OptionList, so that multiple calls to + //! this method may be chained together when defining all the + //! valid options allowed in some section. + //}}} + OptionList *AddTest( const std::string &option_name, option_value_test method ) + { + m_tests[option_name] = method; + return this; + } + + //@} //}}} + + //! @name Validation test methods + //@{ //{{{ + + //! Test that an option name and its value are valid. + //{{{ + //! @param name The name of the option to validate. + //! @param value The value it was assigned. + //! + //! @exception Error will be thrown if the option name is unknown + //! or if the value is not acceptable according to the + //! criteria of the test which was specified for it. + //}}} + void CheckOption( const std::string &name, const std::string &value ) const + { //{{{ + + debug_iniv( " Check option '%s', value '%s'\n", + name.c_str(), value.c_str() ); + + Tests::const_iterator i = m_tests.find( name ); + + if( i == m_tests.end() ) + throw Error( "Unknown option '%s'", name.c_str() ); + + debug_iniv( " Validating '%s'\n", name.c_str() ); + i->second( name, value ); + + } //}}} + + //@} //}}} + + }; //}}} + + + private: + + //! Container for the validation checks to be performed for some section. + class SectionCheck + { //{{{ + public: + + //! @name Container types + //@{ //{{{ + + //! Container for the list of Section checks used by a @c Validator. + typedef std::list< SectionCheck > List; + + //@} //}}} + + + private: + + //! The name of the Section(s) this should check. + std::string m_name; + + //! How to compare m_name for a match to the actual section name. + section_name_test m_test; + + //! The list of Option validation tests for matching Sections. + OptionList::Handle m_options; + + + public: + + //! @name Constructors + //@{ //{{{ + + //! Create a validation check for some INI Section. + //{{{ + //! @param name The string used to check if this set of tests + //! are to be applied to a particular %Section. + //! @param nametest The method used to compare @a name to the + //! %Section identifier to see if these checks + //! are to be applied. It may test for a perfect + //! match, or a prefix match, or use any other + //! criteria appropriate to the application. + //! @param options A list of the validation tests to be applied + //! to each of the Options in a matching Section. + //}}} + SectionCheck( const std::string &name, + section_name_test test, + const OptionList::Handle &options ) + : m_name( name ) + , m_test( test ) + , m_options( options ) + {} + + //@} //}}} + + + //! @name Accessors + //@{ //{{{ + + //! Test a Section against these validation criteria. + //{{{ + //! @param s The Section to validate. + //! + //! @return @c false if the Section identifier does not match this + //! set of tests according to the @c section_name_test being + //! used and the reference string it is being compared to. + //! @c true if the Section identifier was a match and all + //! Options it contained validated successfully. + //! + //! @exception Error will be thrown if this set of tests were applied + //! to the Section, but any Option defined in it failed + //! validation. + //}}} + bool CheckSection( const Section::Handle &s ) const + { //{{{ + + // Is this the section we are looking for? + if( ! m_test( m_name, s->GetName() ) ) + return false; + + debug_iniv( "Checking [%s] with '%s' validator\n", + s->GetName().c_str(), m_name.c_str() ); + + + // If so, are its options all valid? + const Options &opts = s->GetOptions(); + + try { + for( Options::const_iterator i = opts.begin(), + e = opts.end(); i != e; ++i ) + m_options->CheckOption( i->first, i->second ); + } + catch( const std::exception &e ) + { + throw Error( _("Section [%s]: %s"), + s->GetName().c_str(), e.what() ); + } + + debug_iniv( " Check [%s] passed\n", s->GetName().c_str() ); + return true; + + } //}}} + + //! Return the string we are testing section names against. + const std::string &TestStr() const { return m_name; } + + //@} //}}} + + }; //}}} + + + //! The list of Section validation checks to apply. + SectionCheck::List m_sections; + + + public: + + //! @name Handle type + //@{ //{{{ + + //! Handle type for a @c %Validator instance. + typedef RefPtr< Validator > Handle; + + //@} //}}} + + + //! @name Constructors + //@{ //{{{ + + //! Create a new %Validator instance. + Validator() {} + + //@} //}}} + + + //! @name Initialiser methods + //@{ //{{{ + + //! Add tests to validate Section names and the Options they may contain. + //{{{ + //! @param name The string used to check if this set of tests + //! are to be applied to a particular %Section. + //! @param method The @ref section_name_test used to compare @a name + //! to the %Section identifier to see if these checks + //! are to be applied. It may test for a perfect + //! match, or a prefix match, or use any other + //! criteria appropriate to the application. + //! @param options A list of the validation tests to be applied + //! to each of the Options in a matching Section. + //}}} + void Section( const std::string &name, + section_name_test method, + const OptionList::Handle &options ) + { + m_sections.push_back( SectionCheck( name, method, options ) ); + } + + //@} //}}} + + //! @name Validation test methods + //@{ //{{{ + + //! Test INI @a data against the constraints of this %Validator + //{{{ + //! @exception Error will be thrown if validation fails. + //}}} + void Validate( const IniData::Handle &data ) const + { //{{{ + + const Sections &s = data->GetSections(); + + debug_iniv( "Validating %zu INI sections\n", s.size() ); + + for( Sections::const_iterator i = s.begin(), e = s.end(); i != e; ++i ) + { + debug_iniv( "Validate [%s]\n", i->first.c_str() ); + + for( SectionCheck::List::const_iterator ci = m_sections.begin(), + ce = m_sections.end(); ci != ce; ++ci ) + { + if( ci->CheckSection( i->second ) ) + goto check_next_section; + + debug_iniv( " not matched to '%s' validator\n", ci->TestStr().c_str() ); + } + + throw Error( "Unknown section [%s]", i->first.c_str() ); + + check_next_section: + ; + } + + debug_iniv( "Validated %zu INI sections.\n", s.size() ); + + } //}}} + + //! Test INI @a data against the constraints of this %Validator + //{{{ + //! This method will never throw, if there is an error, then a + //! description of it will be returned in the @a error parameter. + //! + //! If no error occurs, then the content of @a error will be untouched. + //! Sane users will generally want to ensure it is empty before this + //! method is called, but that is not a hard requirement. If an error + //! occurs the content of @a error will be replaced, not appended to. + //! + //! @return @c true if there was no error validating the @a data. + //}}} + bool Validate( const IniData::Handle &data, std::string &error ) const + { //{{{ + + try { + Validate( data ); + return true; + } + catch( const abi::__forced_unwind& ) { throw; } + catch( const std::exception &e ) { error = e.what(); } + catch( ... ) { error = "Unknown exception"; } + + return false; + + } //}}} + + //@} //}}} + + }; //}}} + + + private: + + //! All sections mapped by name. + Sections m_sections; + + + //! @name INI structure parsing + //@{ //{{{ + + //! Return the position of the first non-whitespace byte in @a data after @a pos. + //{{{ + //! If there is no character that is not insignificant whitespace after + //! @a pos, then @c std::string::npos will be returned. + //}}} + static size_t skip_whitespace( const std::string &data, size_t pos ) + { //{{{ + + return data.find_first_not_of(" \t\n\r", pos); + + } //}}} + + //! Return the next line of @a data beginning at @a pos. + static std::string get_next_line( const std::string &data, size_t &pos ) + { //{{{ + + // Trim off any leading whitespace. + size_t b = skip_whitespace( data, pos ); + + if( b != std::string::npos ) + { + // Find the next line break. + size_t e = data.find_first_of( "\n\r", b ); + + // Advance the read pointer to the non-whitespace + // character which will start the following line. + pos = skip_whitespace( data, e ); + + //debug_inip( "b = %zu, e = %zu, pos = %zu\n", b, e, pos ); + + return data.substr( b, e - b ); + } + + pos = b; + return std::string(); + + } //}}} + + + //! Parse a line of text containing a Section header. + Section::Handle parse_section( const std::string &s, bool allow_duplicates ) + { //{{{ + + using std::string; + + debug_inip( "begin section: '%s'\n", s.c_str() ); + + size_t n = s.find_first_of(']'); + + if( n == string::npos || s.size() < 3 ) + throw Error( "Invalid section '%s'", s.c_str() ); + + string name = s.substr( 1, n - 1 ); + + debug_inip( "section name: '%s'\n", name.c_str() ); + + if( allow_duplicates ) + return AddOrGetSection( name ); + + return AddSection( name ); + + } //}}} + + //! Parse a line of text containing an option for @a section. + void parse_option( const Section::Handle §ion, + const std::string &s, + bool allow_duplicates ) + { //{{{ + + using std::string; + + // Caller already stripped leading whitespace from the line. + size_t n1 = s.find_first_of(" \t"); // Find end of option + size_t n2 = skip_whitespace( s, n1 ); // Find start of value + size_t n3 = s.find_last_not_of(" \t\n\r" ); // Strip trailing whitespace + + debug_inip( "scan option: '%s' -- n1 %zu, n2 %zu, n3 %zu\n", + s.c_str(), n1, n2, n3); + + string opt = s.substr( 0, n1 ); + string val = (n2 != string::npos) ? s.substr( n2, n3 - (n2 - 1) ) : string(); + + debug_inip( "have option: '%s', value: '%s'\n", opt.c_str(), val.c_str() ); + + if( allow_duplicates ) + section->AddOrUpdateOption( opt, val ); + else + section->AddOption( opt, val ); + + } //}}} + + //! Parse a block of INI formatted data. + void parse( const std::string &data, bool allow_duplicates = false ) + { //{{{ + + using std::string; + + Section::Handle current_section; + size_t pos = 0; + + while( pos != string::npos ) + { + //debug_inip( "pos = %zu\n", pos ); + + string s = get_next_line( data, pos ); + + if( s.empty() ) + continue; + + switch( s[0] ) + { + case '#': + debug_inip( "skipping comment: '%s'\n", s.c_str() ); + break; + + case '[': + current_section = parse_section( s, allow_duplicates ); + break; + + default: + parse_option( current_section, s, allow_duplicates ); + } + } + + } //}}} + + //@} //}}} + + + public: + + //! @name Constructors + //@{ //{{{ + + //! Default constructor for a new empty parser. + IniData() {} + + //! Construct a new instance, parsing a block of INI @a data from a string. + //{{{ + //! @exception Various exceptions may be thrown if the @ data string + //! is not a valid INI structure. + //}}} + IniData( const std::string &data ) + { + parse( data ); + } + + //! Construct a new instance, parsing a block of INI @a data from a string. + //{{{ + //! This constructor will never throw. Instead, if there is an error, + //! then a description of it will be returned in the @a error parameter. + //! If an error is returned, then the content of this parser instance + //! is undefined and no attempt should be made to access it. + //! + //! If no error occurs, then the content of @a error will be untouched. + //! Sane users will generally want to ensure it is empty before this + //! method is called, but that is not a hard requirement. If an error + //! occurs the content of @a error will be replaced, not appended to. + //}}} + IniData( const std::string &data, std::string &error ) + { + Decode( data, error ); + } + + //@} //}}} + + + //! @name Generic container operations + //@{ //{{{ + + //! Erases all data currently held in this parser instance. + void clear() { m_sections.clear(); } + + //! Return @c true if this parser contains no data. + //{{{ + //! It will return @c false if it contains any sections, even if they + //! have no options defined in them. + //}}} + bool empty() const { return m_sections.empty(); } + + //@} //}}} + + //! @name Initialiser methods + //@{ //{{{ + + //! Decode a new block of INI @a data from a string. + //{{{ + //! The existing content of this parser, if any, will be replaced by + //! the new data. + //! + //! @exception Various exceptions may be thrown if the @a data string + //! is not a valid INI structure. + //! + //! If this method throws an exception, then the parser should be + //! considered to be in an indefinite state (at present, the options + //! which were successfully parsed prior to the error will be included + //! in it, while any following options will not - but applications + //! should not rely on that behaviour in any way as it is strictly an + //! implementation detail which could change without warning in some + //! future revision). + //}}} + void Decode( const std::string &data ) + { //{{{ + + clear(); + parse( data ); + + } //}}} + + //! Decode a new block of INI @a data from a string. + //{{{ + //! The existing content of this parser, if any, will be replaced by + //! the new data. + //! + //! This method will never throw. Instead, if there is an error, then + //! a description of it will be returned in the @a error parameter. + //! If an error is returned, then the content of this parser instance + //! is undefined and no attempt should be made to access it. + //! + //! If no error occurs, then the content of @a error will be untouched. + //! Sane users will generally want to ensure it is empty before this + //! method is called, but that is not a hard requirement. If an error + //! occurs the content of @a error will be replaced, not appended to. + //! + //! @return @c true if there was no error parsing the @a data. + //}}} + bool Decode( const std::string &data, std::string &error ) + { //{{{ + + try { + Decode( data ); + return true; + } + catch( const abi::__forced_unwind& ) { throw; } + catch( const std::exception &e ) { error = e.what(); } + catch( ... ) { error = "Unknown exception"; } + + return false; + + } //}}} + + + //! Decode a(nother) block of INI @a data from a string. + //{{{ + //! The existing content of this parser, if any, will @b not be replaced + //! by the new data, it will simply be added to it in the same way as if + //! it has been appended to any existing data when that was parsed. This + //! means that any duplicate %Section names declared in this new @a data + //! will be considered an error. + //! + //! @exception Various exceptions may be thrown if the @a data string + //! is not a valid INI structure. + //! + //! If this method throws an exception, then the parser should be + //! considered to be in an indefinite state (at present, the options + //! which were successfully parsed prior to the error will be included + //! in it, while any following options will not - but applications + //! should not rely on that behaviour in any way as it is strictly an + //! implementation detail which could change without warning in some + //! future revision). + //}}} + void DecodeMore( const std::string &data ) + { + parse( data ); + } + + //! Decode a(nother) block of INI @a data from a string. + //{{{ + //! The existing content of this parser, if any, will @b not be replaced + //! by the new data, it will simply be added to it in the same way as if + //! it has been appended to any existing data when that was parsed. This + //! means that any duplicate %Section names declared in this new @a data + //! will be considered an error. + //! + //! This method will never throw. Instead, if there is an error, then + //! a description of it will be returned in the @a error parameter. + //! If an error is returned, then the content of this parser instance + //! is undefined and no attempt should be made to access it. + //! + //! If no error occurs, then the content of @a error will be untouched. + //! Sane users will generally want to ensure it is empty before this + //! method is called, but that is not a hard requirement. If an error + //! occurs the content of @a error will be replaced, not appended to. + //! + //! @return @c true if there was no error parsing the @a data. + //}}} + bool DecodeMore( const std::string &data, std::string &error ) + { //{{{ + + try { + parse( data ); + return true; + } + catch( const abi::__forced_unwind& ) { throw; } + catch( const std::exception &e ) { error = e.what(); } + catch( ... ) { error = "Unknown exception"; } + + return false; + + } //}}} + + + //! Update the existing options with a block of INI @a data from a string. + //{{{ + //! The existing content of this parser, if any, will be appended to or + //! updated by the new data. It is not an error for it to contain + //! %Sections and %Options which have been already defined, the new values + //! will simply replace any old ones which already existed, and add any + //! which previously did not. + //! + //! @exception Various exceptions may be thrown if the @a data string + //! is not a valid INI structure. + //! + //! If this method throws an exception, then the parser should be + //! considered to be in an indefinite state (at present, the options + //! which were successfully parsed prior to the error will be included + //! in it, while any following options will not - but applications + //! should not rely on that behaviour in any way as it is strictly an + //! implementation detail which could change without warning in some + //! future revision). + //}}} + void UpdateWith( const std::string &data ) + { + parse( data, true ); + } + + //! Update the existing options with a block of INI @a data from a string. + //{{{ + //! The existing content of this parser, if any, will be appended to or + //! updated by the new data. It is not an error for it to contain + //! %Sections and %Options which have been already defined, the new values + //! will simply replace any old ones which already existed, and add any + //! which previously did not. + //! + //! This method will never throw. Instead, if there is an error, then + //! a description of it will be returned in the @a error parameter. + //! If an error is returned, then the content of this parser instance + //! is undefined and no attempt should be made to access it. + //! + //! If no error occurs, then the content of @a error will be untouched. + //! Sane users will generally want to ensure it is empty before this + //! method is called, but that is not a hard requirement. If an error + //! occurs the content of @a error will be replaced, not appended to. + //! + //! @return @c true if there was no error parsing the @a data. + //}}} + bool UpdateWith( const std::string &data, std::string &error ) + { //{{{ + + try { + parse( data, true ); + return true; + } + catch( const abi::__forced_unwind& ) { throw; } + catch( const std::exception &e ) { error = e.what(); } + catch( ... ) { error = "Unknown exception"; } + + return false; + + } //}}} + + + //! Add a new @c %Section. + //{{{ + //! This method will not alter any existing data, it will just create + //! an additional new @c Section. + //! + //! @param name The identifier for the new section. + //! + //! @return A handle to the newly created section. + //! + //! @exception Error will be thrown if @a name is already definied as + //! an existing section. + //}}} + Section::Handle AddSection( const std::string &name ) + { //{{{ + + if( m_sections.find( name ) != m_sections.end() ) + throw Error( "Duplicated section [%s]", name.c_str() ); + + m_sections[ name ] = new Section( name ); + + return m_sections[name]; + + } //}}} + + + //! Add a new Option to a @c %Section. + //{{{ + //! This method will not alter any existing data, it will just define + //! an additional new Option in some @c Section. + //! + //! @param section A handle to the section that the option is to be + //! added to. + //! @param option The identifier name of the new option. + //! @param value The value to set for this option. If not provided, + //! the option's value will be an empty string. + //! + //! @note It is the caller's responsibility to ensure that @a section + //! is a valid handle to an existing %Section. + //! + //! @exception Error will be thrown if the @a option is already defined + //! in this @a section. + //}}} + void AddOption( const Section::Handle §ion, + const std::string &option, + const std::string &value = std::string() ) + { + section->AddOption( option, value ); + } + + //! Add a new Option to a named @c %Section. + //{{{ + //! This method will not alter any existing data, it will just define + //! an additional new Option in some @c Section. + //! + //! @param section The name of the section that the option is to be + //! added to. + //! @param option The identifier name of the new option. + //! @param value The value to set for this option. If not provided, + //! the option's value will be an empty string. + //! + //! @exception Error will be thrown if the @a option is already defined + //! in this @a section, or if the section does not already + //! exist. + //}}} + void AddOption( const std::string §ion, + const std::string &option, + const std::string &value = std::string() ) + { + GetSection( section )->AddOption( option, value ); + } + + //! Change the value of an existing Option. + //{{{ + //! The option being updated must already exist in the given @a section. + //! + //! @param section A handle to the section where the option is found. + //! @param option The identifier name of the option. + //! @param value The new value to set for this option. If not provided, + //! the option's value will be an empty string. + //! + //! @note It is the caller's responsibility to ensure that @a section + //! is a valid handle to an existing %Section. + //! + //! @exception Error will be thrown if the @a option is not already + //! defined in this @a section. + //}}} + void UpdateOption( const Section::Handle §ion, + const std::string &option, + const std::string &value = std::string() ) + { + section->UpdateOption( option, value ); + } + + //! Change the value of an existing Option. + //{{{ + //! The option being updated, and the @c Section it is contained in, + //! must already exist. + //! + //! @param section The name of the section that the option is to be + //! added to. + //! @param option The identifier name of the option. + //! @param value The new value to set for this option. If not provided, + //! the option's value will be an empty string. + //! + //! @exception Error will be thrown if the @a option is not already + //! defined in this @a section, or if the section does not + //! already exist. + //}}} + void UpdateOption( const std::string §ion, + const std::string &option, + const std::string &value = std::string() ) + { + GetSection( section )->UpdateOption( option, value ); + } + + //! %Set or change the value of an Option. + //{{{ + //! The option will be added if it does not already exist, else its value + //! will be updated if it does. + //! + //! @param section A handle to the section where the option is found. + //! @param option The identifier name of the option. + //! @param value The value to set for this option. If not provided, + //! the option's value will be an empty string. + //! + //! @note It is the caller's responsibility to ensure that @a section + //! is a valid handle to an existing %Section. + //}}} + void AddOrUpdateOption( const Section::Handle §ion, + const std::string &option, + const std::string &value = std::string() ) + { + section->AddOrUpdateOption( option, value ); + } + + //! %Set or change the value of an Option. + //{{{ + //! The option will be added if it does not already exist, else its value + //! will be updated if it does. If the section does not already exist, + //! then it will be created too. + //! + //! @param section The name of the section that the option is to be + //! added to. + //! @param option The identifier name of the option. + //! @param value The value to set for this option. If not provided, + //! the option's value will be an empty string. + //}}} + void AddOrUpdateOption( const std::string §ion, + const std::string &option, + const std::string &value = std::string() ) + { + AddOrGetSection( section )->AddOrUpdateOption( option, value ); + } + + //@} //}}} + + //! @name Removal methods + //@{ //{{{ + + //! Remove a @c %Section. + //{{{ + //! @param name The identifier of the section to remove. + //! + //! @return @c true if a section with that name existed and was removed. + //}}} + bool RemoveSection( const std::string &name ) + { + return m_sections.erase( name ) > 0; + } + + //! Remove an Option from a @c %Section. + //{{{ + //! @param section A handle to the section that the option is to be + //! removed from. + //! @param option The identifier name of the option to remove. + //! + //! @note It is the caller's responsibility to ensure that @a section + //! is a valid handle to an existing %Section. + //! + //! @return @c true if an option with that name existed in that section + //! and was removed. + //}}} + bool RemoveOption( const Section::Handle §ion, const std::string &option ) + { + return section->RemoveOption( option ); + } + + //! Remove an Option from a @c %Section. + //{{{ + //! @param section The name of the section that the option is to be + //! removed from. + //! @param option The identifier name of the option to remove. + //! + //! @return @c true if an option with that name existed in a section + //! with that name and was removed. + //}}} + bool RemoveOption( const std::string §ion, const std::string &option ) + { //{{{ + + Sections::const_iterator i = m_sections.find( section ); + + if( i == m_sections.end() ) + return false; + + return i->second->RemoveOption( option ); + + } //}}} + + //@} //}}} + + //! @name Accessor methods + //@{ //{{{ + + //! Return a map of all sections. + const Sections &GetSections() const + { + return m_sections; + } + + //! Return a map of all sections with names matching the given @a prefix. + //{{{ + //! The keys of the returned map are the trailing portions of the matching + //! section names not including the prefix. To get the full section name + //! you can call the @c Section::GetName() method (if the @a prefix is no + //! longer available to prepend to the key). + //}}} + Sections GetSections( const std::string &prefix ) const + { //{{{ + + Sections s; + size_t n = prefix.size(); + + for( Sections::const_iterator i = m_sections.begin(), + e = m_sections.end(); i != e; ++i ) + { + if( StartsWith( prefix, i->first ) ) + s[i->first.substr(n)] = i->second; + } + + return s; + + } //}}} + + + //! Return @c true if %Section @a name is defined. + bool HasSection( const std::string &name ) const + { + return m_sections.find( name ) != m_sections.end(); + } + + //! Return a handle to %Section @a name. + //{{{ + //! @param name The identifier of the requested Section. + //! + //! @exception Error will be thrown if the %Section is not defined. + //}}} + Section::Handle GetSection( const std::string &name ) const + { //{{{ + + Sections::const_iterator i = m_sections.find( name ); + + if( i == m_sections.end() ) + throw Error( "Section [%s] is not defined", name.c_str() ); + + return i->second; + + } //}}} + + //! Return a handle to %Section @a name. + //{{{ + //! If the @c Section was not already defined, it will be created and a + //! handle to the new empty @c %Section structure will be returned. + //! + //! @param name The identifier of the requested Section. + //}}} + Section::Handle AddOrGetSection( const std::string &name ) + { //{{{ + + if( ! HasSection( name ) ) + return AddSection( name ); + + return m_sections[name]; + + } //}}} + + + //! Return a map of all options defined in @a section. + //{{{ + //! @note It is the caller's responsibility to ensure that @a section + //! is a valid handle to an existing %Section. + //}}} + const Options &GetOptions( const Section::Handle §ion ) const + { + return section->GetOptions(); + } + + //! Return a map of all options defined in the named @a section. + //{{{ + //! @exception Error will be thrown if @a section is not defined. + //}}} + const Options &GetOptions( const std::string §ion ) const + { + return GetSection( section )->GetOptions(); + } + + + //! Return @c true if @a option is defined in @a section. + //{{{ + //! @note It is the caller's responsibility to ensure that @a section + //! is a valid handle to an existing %Section. + //}}} + bool HasOption( const Section::Handle §ion, const std::string &option ) const + { + return section->HasOption( option ); + } + + //! Return @c true if @a option is defined in the named @a section. + //{{{ + //! This will return @c false if either the section itself is not defined, + //! or if the option is not defined within it. + //}}} + bool HasOption( const std::string §ion, const std::string &option ) const + { //{{{ + + Sections::const_iterator i = m_sections.find( section ); + + if( i == m_sections.end() ) + return false; + + return i->second->HasOption( option ); + + } //}}} + + + //! Return the value of @a option defined in @a section. + //{{{ + //! @note It is the caller's responsibility to ensure that @a section + //! is a valid handle to an existing %Section. + //! + //! @exception Error will be thrown if @a option is not defined in + //! the given @a section. + //}}} + std::string GetOption( const Section::Handle §ion, + const std::string &option ) const + { + return section->GetOption( option ); + } + + //! Return the value of @a option defined in the named @a section. + //{{{ + //! @exception Error will be thrown if @a option is not defined in + //! the given @a section, or if the section itself is not + //! defined. + //}}} + std::string GetOption( const std::string §ion, + const std::string &option ) const + { + return GetSection( section )->GetOption( option ); + } + + //! Query the value of @a option in a @a section. + //{{{ + //! This method will not throw if the option is not defined in the + //! given section, it will return the @a default_value provided for + //! it instead. + //! + //! @param section A handle to the selected Section. + //! @param option The option that a value is wanted for. + //! @param default_value A value to return if @a option is not + //! defined in the @a section. + //! + //! @note It is the caller's responsibility to ensure that @a section + //! is a valid handle to an existing %Section. + //}}} + std::string GetOption( const Section::Handle §ion, + const std::string &option, + const std::string &default_value ) const + { + return section->GetOption( option, default_value ); + } + + //! Query the value of @a option in the named @a section. + //{{{ + //! This method will not throw if the option is not defined in the + //! given section, or if the section itself is not defined. It will + //! return the @a default_value provided for it instead. + //! + //! @param section The name of the selected Section. + //! @param option The option that a value is wanted for. + //! @param default_value A value to return if @a option is not + //! defined in the @a section. + //}}} + std::string GetOption( const std::string §ion, + const std::string &option, + const std::string &default_value ) const + { //{{{ + + Sections::const_iterator i = m_sections.find( section ); + + if( i == m_sections.end() ) + return default_value; + + return i->second->GetOption( option, default_value ); + + } //}}} + + //@} //}}} + + //! @name Output methods + //@{ //{{{ + + //! Return an INI encoded string of the structures in this parser + //{{{ + //! @note The string returned may not be identical to the one that + //! was originally parsed. The ordering of Sections and + //! Options may not be preserved and insignificant whitespace + //! may be different. + //}}} + std::string INIStr() const + { //{{{ + + std::string s; + + for( Sections::const_iterator i = m_sections.begin(), + e = m_sections.end(); i != e; ++i ) + s.append( i->second->INIStr() + '\n' ); + + return s; + + } //}}} + + //@} //}}} + + }; //}}} + +//!@} + +} // BitB namespace + + +// Don't let these leak outside this file, nobody else should use them, and +// we avoid a possible conflict with some exernal dependency included later. +#undef debug_inip +#undef debug_iniv + +#endif // _BB_INIPARSER_H + +// vi:sts=4:sw=4:et:foldmethod=marker diff -Nru bit-babbler-0.7/include/bit-babbler/json.h bit-babbler-0.8/include/bit-babbler/json.h --- bit-babbler-0.7/include/bit-babbler/json.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/json.h 2018-02-07 23:56:52.000000000 +0000 @@ -4,7 +4,7 @@ //! @ingroup JsonParsing //! @brief Parser for JSON formatted data. // -// Copyright 2013 - 2017, Ron +// Copyright 2013 - 2018, Ron // This file is distributed as part of the bit-babbler package. // //////////////////////////////////////////////////////////////////// @@ -377,6 +377,51 @@ //@} //}}} + //! @name Comparison operators + //@{ //{{{ + + //! Return @c true if there is an object to dereference. + //{{{ + //! Because this specialised @c RefPtr has an implicit conversion + //! to @c double operator, we can't just use the normal idiom for + //! testing if it actually contains anything. + //! + //! For example: + //! @code + //! // This is what we'd usually do, and clearly says what it means, + //! // but ISO C++ says these two possible resolutions are ambiguous: + //! // RefPtr::operator!=(T*) + //! // operator!=(double, long int) + //! // Thanks to the historically silly definition of NULL in C++. + //! if( handle != NULL ) + //! ... + //! + //! // This resolves that, but eww ... + //! if( handle != static_cast< HandleType >(NULL) ) + //! ... + //! + //! // This also works, but the double negation trick isn't usually + //! // something it's nice to scatter through user-facing code. + //! if( !!handle ) + //! ... + //! + //! // So we provide this easy-to-read-at-a-glance option instead. + //! if( handle.IsNotNULL() ) + //! ... + //! @endcode + //! + //! So the only trick here now, is to not confuse this functionality + //! with the operation of the @c Data::IsNull accessor for the JSON + //! @c null literal primitive type. Note the distinction between + //! @c NULL, and @c Null or @c null. + //}}} + bool IsNotNULL() const + { + return RefPtr::Raw() != NULL; + } + + //@} //}}} + }; //}}} @@ -673,6 +718,40 @@ return String(); } + + //! Return the value of a numeric primitive as type @a T + //{{{ + //! RFC 7159 recommends that for interoperability an implementation + //! should expect numeric primitives to have the precision and range + //! of an IEEE 754 @c double precision floating point type. And JSON + //! itself makes no distinction between integer and floating point + //! numeric values, to it they are all just the same primitive type. + //! + //! However, in any real use, it is likely that values which are strictly + //! always integers will be encoded and decoded. This method can be used + //! for safe conversion of a JSON numeric primitive to any other numeric + //! type which the software calling it requires. A compile time error + //! will occur if the @c double type cannot be @c static_cast to type @a T, + //! and a runtime exception will be thrown if this is not a JSON numeric + //! primitive. + //! + //! @tparam T The desired numeric type. + //! + //! @note This method hides the normal @c RefCounted::As dynamic cast + //! operator, since it has the same semantics and the narrowed + //! scope of only applying this operation to numeric primitives + //! is appropriate here. The base class method can still be + //! called explicitly if needed by some specialised case though. + //! + //! @exception Error will be thrown if there is no data or this is not a + //! numeric primitive type + //}}} + template< typename T > + T As() const + { + return static_cast( Number() ); + } + //@} //}}} //! @name Object accessors @@ -1199,6 +1278,7 @@ // by the configure test) that they'll need to run this using the "C" // locale if their default locale treats decimal numbers differently // to that. + (void)clocale; m_value = strtod( b, &e ); #endif @@ -1931,6 +2011,10 @@ pos, data.c_str() ); pos = b; + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpragmas" + #pragma GCC diagnostic ignored "-Wgnu-case-range" + switch( data[pos] ) { case '{': @@ -1955,6 +2039,7 @@ throw Error( "Invalid JSON value at position %zu in '%s'", pos, data.c_str() ); } + #pragma GCC diagnostic pop } //}}} @@ -2438,6 +2523,40 @@ return String(); } + + //! Return the value of a numeric primitive as type @a T + //{{{ + //! RFC 7159 recommends that for interoperability an implementation + //! should expect numeric primitives to have the precision and range + //! of an IEEE 754 @c double precision floating point type. And JSON + //! itself makes no distinction between integer and floating point + //! numeric values, to it they are all just the same primitive type. + //! + //! However, in any real use, it is likely that values which are strictly + //! always integers will be encoded and decoded. This method can be used + //! for safe conversion of a JSON numeric primitive to any other numeric + //! type which the software calling it requires. A compile time error + //! will occur if the @c double type cannot be @c static_cast to type @a T, + //! and a runtime exception will be thrown if this is not a JSON numeric + //! primitive. + //! + //! @tparam T The desired numeric type. + //! + //! @note This method hides the normal @c RefCounted::As dynamic cast + //! operator, since it has the same semantics and the narrowed + //! scope of only applying this operation to numeric primitives + //! is appropriate here. The base class method can still be + //! called explicitly if needed by some specialised case though. + //! + //! @exception Error will be thrown if there is no data or this is not a + //! numeric primitive type + //}}} + template< typename T > + T As() const + { + return static_cast( Number() ); + } + //@} //}}} //! @name Object accessors diff -Nru bit-babbler-0.7/include/bit-babbler/log.h bit-babbler-0.8/include/bit-babbler/log.h --- bit-babbler-0.7/include/bit-babbler/log.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/log.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2003 - 2017, Ron +// Copyright 2003 - 2018, Ron // // You must include bit-babbler/impl/log.h exactly once in some translation unit // of any program using the Log() functions. @@ -11,10 +11,10 @@ #ifdef _REENTRANT #include -#endif -#if HAVE_PTHREAD_SET_NAME_NP + #if HAVE_PTHREAD_SET_NAME_NP #include + #endif #endif #if EM_PLATFORM_POSIX @@ -37,6 +37,7 @@ #define LOG_NOTICE 5 // normal but significant condition void openlog(const char *ident, int option, int facility); + void syslog(int priority, const char *format, ...); void vsyslog(int priority, const char *format, va_list ap); #endif @@ -93,11 +94,108 @@ namespace BitB { + //! @name String length functions + //! Overloaded for generic template friendliness. + //@{ //{{{ + + //! Return the (string) length of a @c char. + inline size_t stringlength( char ) { return 1; } + + //! Return the length of a @c NULL terminated @c char* string. + inline size_t stringlength( const char *s ) { return ( s ) ? strlen( s ) : 0; } + + //! Return the (string) length of a @c wchar_t. + inline size_t stringlength( wchar_t ) { return 1; } + + //! Return the length of a @c NULL terminated @c wchar_t* string. + inline size_t stringlength( const wchar_t *s ) { return ( s ) ? wcslen( s ) : 0; } + + //! Return the length of a string type (with a @c size() method). + template< typename S > + BB_PURE size_t stringlength( const S &s ) { return s.size(); } + + //@} //}}} + + + //! Test if @a s starts with the string @a c + //{{{ + //! @param c The substring to match. It may be a: + //! - @c NULL terminated @c char* or @c wchar_t* string + //! - @c std::basic_string compatible type. + //! @param s The string to match @a c in. Must be a @c std::basic_string + //! compatible type. + //! + //! @return @c true if @a s starts with the string @a c + //}}} + template< typename C, typename S > + bool StartsWith( C c, const S &s ) + { //{{{ + + size_t n = stringlength(c); + + if( s.size() < n ) + return false; + + return s.compare( 0, n, c ) == 0; + + } //}}} + + + //! Returns all characters after the first occurrence of @a c, or empty if @a c is not in @a s. + //{{{ + //! @param c The character or substring to match. It may be a: + //! - single @c char or @c wchar_t type + //! - @c NULL terminated @c char* or @c wchar_t* string + //! - @c std::basic_string compatible type. + //! @param s The string to match @a c in. Must be a @c std::basic_string + //! compatible type. + //! + //! @return A string of type @a S, containing all characters after the first + //! occurrence of @a c, or an empty string if @a c is not in @a s. + //}}} + template< typename C, typename S > + S afterfirst( C c, const S &s ) + { //{{{ + + typename S::size_type n = s.find( c ); + return ( n == S::npos || ( n += stringlength( c ), n == s.size() ) ) + ? S() + : s.substr( n ); + + } //}}} + + //! Return all characters before the first occurrence of @a c, or @a s if @a c is not in @a s. + //{{{ + //! @param c The character or substring to match. It may be a: + //! - single @c char or @c wchar_t type + //! - @c NULL terminated @c char* or @c wchar_t* string + //! - @c std::basic_string compatible type. + //! @param s The string to match @a c in. Must be a @c std::basic_string + //! compatible type. + //! + //! @return A string of type @a S, containing all characters before the + //! first occurrence of @a c, or @a s if @a c is not in @a s. + //}}} + template< typename C, typename S > + S beforefirst( C c, const S &s ) + { //{{{ + + typename S::size_type n = s.find( c ); + + return ( n == S::npos ) ? s + : ( n == 0 ) ? S() + : S( s.substr( 0, n ) ); + // cast here to keep the nested ternary operator happy + + } //}}} + + + typedef std::basic_string OctetString; std::string OctetsToHex( const OctetString &octets, size_t wrap = 0, bool short_form = false ); - std::string OctetsToShortHex( const OctetString &octets, size_t wrap = 0 ) + static inline std::string OctetsToShortHex( const OctetString &octets, size_t wrap = 0 ) { return OctetsToHex( octets, wrap, true ); } @@ -133,6 +231,18 @@ return StrToUL( s.c_str(), base ); } + // These two mostly exist as a convenience to hush clang paranoia about + // implicit conversions losing precision for unsigned int conversions. + static inline unsigned StrToU( const char *s, int base = 0 ) + { + return unsigned(StrToUL(s, base)); + } + + static inline unsigned StrToU( const std::string &s, int base = 0 ) + { + return StrToU( s.c_str(), base ); + } + unsigned long StrToScaledUL( const char *s, unsigned scale = 1000 ); @@ -141,6 +251,16 @@ return StrToScaledUL( s.c_str(), scale ); } + static inline unsigned StrToScaledU( const char *s, unsigned scale = 1000 ) + { + return unsigned(StrToScaledUL(s, scale)); + } + + static inline unsigned StrToScaledU( const std::string &s, unsigned scale = 1000 ) + { + return StrToScaledU( s.c_str(), scale ); + } + double StrToScaledD( const char *s ); @@ -154,6 +274,7 @@ BB_PRINTF_FORMAT(1,2) std::string stringprintf( const char *format, ... ); + BB_PRINTF_FORMAT(1,0) std::string vstringprintf( const char *format, va_list arglist ); @@ -178,12 +299,13 @@ } //}}} - std::string timeprintf( std::string format, const timeval &tv = GetWallTimeval() ); + BB_STRFTIME_FORMAT(1,0) + std::string timeprintf( const char *format, const timeval &tv = GetWallTimeval() ); #ifdef _REENTRANT - void SetThreadName( const char *name, pthread_t tid = pthread_self() ) + static inline void SetThreadName( const char *name, pthread_t tid = pthread_self() ) { //{{{ #if HAVE_PTHREAD_SETNAME_NP_GNU @@ -211,7 +333,7 @@ } //}}} - void SetThreadName( const std::string &name, pthread_t tid = pthread_self() ) + static inline void SetThreadName( const std::string &name, pthread_t tid = pthread_self() ) { SetThreadName( name.c_str(), tid ); } @@ -226,7 +348,20 @@ extern int opt_verbose; - template< int N > void Logv( const char *format, va_list arglist ) + static inline void SendLogsToSyslog( const char *id, + int option = LOG_PID, + int facility = LOG_DAEMON ) + { //{{{ + + opt_syslog = 1; + openlog( id, option, facility ); + + } //}}} + + + template< int N > + BB_PRINTF_FORMAT(1,0) + void Logv( const char *format, va_list arglist ) { //{{{ if( opt_verbose < N ) @@ -245,12 +380,12 @@ if( opt_timestamp ) { - std::string fmt = timeprintf("%T.%%u") + ": " + format; - + std::string msg = timeprintf("%T.%%u") + ": " + + vstringprintf( format, arglist ); if( opt_syslog ) - vsyslog( LOG_MAKEPRI(LOG_DAEMON, LOG_NOTICE), fmt.c_str(), arglist ); + syslog( LOG_MAKEPRI(LOG_DAEMON, LOG_NOTICE), "%s", msg.c_str() ); else - vfprintf( stderr, fmt.c_str(), arglist ); + fprintf( stderr, "%s", msg.c_str() ); } else { @@ -284,16 +419,17 @@ void LogErr( const char *format, ... ) { //{{{ - va_list arglist; - std::string fmt( format ); + va_list arglist; + va_start( arglist, format ); - if( fmt.size() && fmt[fmt.size() - 1] == '\n' ) - fmt.erase( fmt.size() - 1 ); + std::string msg = vstringprintf( format, arglist ); - fmt.append(": ").append( strerror(errno) ).append(1,'\n'); + if( msg.size() && msg[msg.size() - 1] == '\n' ) + msg.erase( msg.size() - 1 ); - va_start( arglist, format ); - Logv( fmt.c_str(), arglist ); + msg.append(": ").append( strerror(errno) ).append(1,'\n'); + + Log( "%s", msg.c_str() ); va_end( arglist ); } //}}} @@ -303,16 +439,17 @@ void LogErr( int code, const char *format, ... ) { //{{{ - va_list arglist; - std::string fmt( format ); + va_list arglist; + va_start( arglist, format ); - if( fmt.size() && fmt[fmt.size() - 1] == '\n' ) - fmt.erase( fmt.size() - 1 ); + std::string msg = vstringprintf( format, arglist ); - fmt.append(": ").append( strerror(code) ).append(1,'\n'); + if( msg.size() && msg[msg.size() - 1] == '\n' ) + msg.erase( msg.size() - 1 ); - va_start( arglist, format ); - Logv( fmt.c_str(), arglist ); + msg.append(": ").append( strerror(code) ).append(1,'\n'); + + Log( "%s", msg.c_str() ); va_end( arglist ); } //}}} diff -Nru bit-babbler-0.7/include/bit-babbler/math.h bit-babbler-0.8/include/bit-babbler/math.h --- bit-babbler-0.7/include/bit-babbler/math.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/math.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2013 - 2015, Ron +// Copyright 2013 - 2018, Ron #ifndef _BB_MATH_H #define _BB_MATH_H @@ -26,14 +26,26 @@ return v ? sizeof(unsigned) * 8 - unsigned(__builtin_clz(v)) : 0; } - static inline unsigned powof2_down( unsigned v ) + static inline unsigned fls( unsigned long v ) { - return 1u << (fls(v) - 1); + return v ? sizeof(unsigned long) * 8 - unsigned(__builtin_clzl(v)) : 0; } - static inline unsigned powof2_up( unsigned v ) + static inline unsigned fls( unsigned long long v ) { - return 1u << fls(v - 1); + return v ? sizeof(unsigned long long) * 8 - unsigned(__builtin_clzll(v)) : 0; + } + + template< typename T > + BB_CONST T powof2_down( T v ) + { + return T(1) << (fls(v) - 1); + } + + template< typename T > + BB_CONST T powof2_up( T v ) + { + return T(1) << fls(v - 1); } } diff -Nru bit-babbler-0.7/include/bit-babbler/qa.h bit-babbler-0.8/include/bit-babbler/qa.h --- bit-babbler-0.7/include/bit-babbler/qa.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/qa.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2014 - 2016, Ron +// Copyright 2014 - 2018, Ron // // With much kudos to John "Random" Walker for the public domain ENT suite // of tests, which the implementation below doesn't actually take any code @@ -308,13 +308,13 @@ {} Fail( const Json::Data::Handle &fail ) - : tested( fail["Tested"] ) - , entropy( fail["Entropy"] ) - , chisq( fail["Chisq"] ) - , mean( fail["Mean"] ) - , pi( fail["Pi"] ) - , corr( fail["Autocorr"] ) - , minentropy( fail["MinEntropy"] ) + : tested( fail["Tested"]->As() ) + , entropy( fail["Entropy"]->As() ) + , chisq( fail["Chisq"]->As() ) + , mean( fail["Mean"]->As() ) + , pi( fail["Pi"]->As() ) + , corr( fail["Autocorr"]->As() ) + , minentropy( fail["MinEntropy"]->As() ) {} @@ -415,7 +415,7 @@ s += stringprintf( "%*u: %s %.*x -> %-12zu %+10.2f %8.2f %.9f\n", sizeof(T) > 1 ? 5 : 3, b.rank, - AsBinary(b.symbol).c_str(), + AsBinary(T(b.symbol)).c_str(), int(sizeof(T) * 2), b.symbol, b.freq, error, errorsq, double(b.freq) / nsamples ); } @@ -470,9 +470,9 @@ } Data( const Json::Data::Handle &data ) - : samples( data["Samples"] ) - , inradius( data["PiIn"] ) - , pisamples( data["PiSamples"] ) + : samples( data["Samples"]->As() ) + , inradius( data["PiIn"]->As() ) + , pisamples( data["PiSamples"]->As() ) { //{{{ Json::Data::Handle binarray = data["Bins"]; @@ -482,7 +482,7 @@ NBITS, binarray->GetArraySize() ); for( size_t i = 0; i < NBINS; ++i ) - bin[i] = binarray[i]; + bin[i] = binarray[i]->As(); for( size_t i = 0; i < DATASET_MAX; ++i ) result[i] = Result( data[ DataSetName(DataSet(i)) ] ); @@ -499,7 +499,7 @@ { //{{{ clear(); - samples = data["Samples"]; + samples = data["Samples"]->As(); for( size_t i = 0; i < DATASET_MAX; ++i ) result[i] = Result( data[ DataSetName(DataSet(i)) ] ); @@ -558,9 +558,9 @@ double fudge = sqrt(new_expected * chisq); if( error < 0 ) - bin[i] = lrint(new_expected - fudge); + bin[i] = size_t(lrint(new_expected - fudge)); else - bin[i] = lrint(new_expected + fudge); + bin[i] = size_t(lrint(new_expected + fudge)); samples += bin[i]; } @@ -746,14 +746,14 @@ typename Bin::Vector bins; for( size_t i = 0; i < NBINS; ++i ) - bins.push_back( Bin( i, bin[i] ) ); + bins.push_back( Bin( unsigned(i), bin[i] ) ); typename Bin::Vector sorted_bins( bins ); std::stable_sort( sorted_bins.begin(), sorted_bins.end(), Bin::ByFrequency ); for( size_t i = 0; i < NBINS; ++i ) - bins[ sorted_bins[i].symbol ].rank = i + 1; + bins[ sorted_bins[i].symbol ].rank = unsigned(i + 1); return Bin::PrettyPrint( bins, samples, first_n, last_n ); @@ -766,12 +766,12 @@ typename Bin::Vector bins; for( size_t i = 0; i < NBINS; ++i ) - bins.push_back( Bin( i, bin[i] ) ); + bins.push_back( Bin( unsigned(i), bin[i] ) ); std::stable_sort( bins.begin(), bins.end(), Bin::ByFrequency ); for( size_t i = 0; i < NBINS; ++i ) - bins[i].rank = i + 1; + bins[i].rank = unsigned(i + 1); return Bin::PrettyPrint( bins, samples, first_n, last_n ); @@ -906,7 +906,7 @@ : m_short_len( short_len ? short_len : NBITS == 8 ? 500000 : 100000000 ) - , m_radius( floor( pow( pow(256.0, MONTE_BYTES / 2) - 1.0, 2.0 ) ) ) + , m_radius( uint64_t(floor( pow( pow(256.0, MONTE_BYTES / 2) - 1.0, 2.0 ) )) ) , m_have_results( false ) , m_have_unchecked_results( false ) , m_entropy_converged( 0 ) @@ -1236,6 +1236,9 @@ }; //}}} + template< typename T> + typename Ent::Results_Only_ Ent::Results_Only; + template <> inline const Ent::Limits &Ent::GetLimits() const { //{{{ @@ -1420,7 +1423,7 @@ { m_chisq += m_err[0][i] * m_err[0][i] / m_expected[i] + m_err[1][i] * m_err[1][i] / m_expected[i]; - m_chisqk = i; + m_chisqk = unsigned(i); } div *= 2.0; @@ -1437,7 +1440,7 @@ m_chisq += m_err[0][i] * m_err[0][i] / m_expected[i] + m_err[1][i] * m_err[1][i] / m_expected[i]; - m_chisqk = i; + m_chisqk = unsigned(i); } else break; @@ -1490,8 +1493,8 @@ Result( const Json::Data::Handle &result ) : m_chisq( result["Chisq"] ) , m_chisqp( result["Chisq-p"] ) - , m_chisqk( result["Chisq-k"] ) - , maxrun( result["Max"] ) + , m_chisqk( result["Chisq-k"]->As() ) + , maxrun( result["Max"]->As() ) { //{{{ Json::Data::Handle runs = result["Runs"]; @@ -1505,13 +1508,13 @@ memset( m_err, 0, sizeof(m_err) ); memset( runlengths, 0, sizeof(runlengths) ); - total[0] = result["Zeros"]; - total[1] = result["Ones"]; + total[0] = result["Zeros"]->As(); + total[1] = result["Ones"]->As(); for( size_t i = 0; i < nruns; ++i ) { - runlengths[0][i] = runs[i][0]; - runlengths[1][i] = runs[i][1]; + runlengths[0][i] = runs[i][0]->As(); + runlengths[1][i] = runs[i][1]->As(); m_expected[i] = runs[i][2]; m_err[0][i] = runlengths[0][i] - m_expected[i]; m_err[1][i] = runlengths[1][i] - m_expected[i]; @@ -1542,7 +1545,7 @@ // probability of being within about +/- 3 of this value. size_t GetExpectedMax() const { - return lrint( log2( (total[0] + total[1]) / 2.0 ) ); + return size_t(lrint( log2( (total[0] + total[1]) / 2.0 ) )); } @@ -1771,11 +1774,11 @@ PassRuns( const Json::Data::Handle &pass ) : m_count( 0 ) - , m_runs( pass["Runs"] ) - , m_previous( pass["Previous"] ) - , m_avg( size_t(pass["Short"]) << AVG_Q ) - , m_long_avg( pass["Long"] * m_runs ) - , m_peak( pass["Peak"] ) + , m_runs( pass["Runs"]->As() ) + , m_previous( pass["Previous"]->As() ) + , m_avg( pass["Short"]->As() << AVG_Q ) + , m_long_avg( pass["Long"]->As() * m_runs ) + , m_peak( pass["Peak"]->As() ) {} @@ -1899,10 +1902,10 @@ {} FailRate( const Json::Data::Handle &stats ) - : pass( stats["Passed"] ) - , fail( stats["Failed"] ) - , peak( stats["Peak"] * (1u << AVG_Q) ) - , rate( stats["Short"] * (1u << AVG_Q) ) + : pass( stats["Passed"]->As() ) + , fail( stats["Failed"]->As() ) + , peak( size_t(stats["Peak"] * (1u << AVG_Q)) ) + , rate( size_t(stats["Short"] * (1u << AVG_Q)) ) {} @@ -2115,8 +2118,8 @@ unsigned Check( const uint8_t *buf ) { //{{{ - const unsigned pokermin = ( 2.16 + 5000) * 5000 / 16; // 1563175 - const unsigned pokermax = (46.17 + 5000) * 5000 / 16; // 1576928.125 + const unsigned pokermin = unsigned(( 2.16 + 5000.) * 5000. / 16.); // 1563175 + const unsigned pokermax = unsigned((46.17 + 5000.) * 5000. / 16.); // 1576928.125 unsigned result = 0; unsigned ones_count = 0; diff -Nru bit-babbler-0.7/include/bit-babbler/secret-sink.h bit-babbler-0.8/include/bit-babbler/secret-sink.h --- bit-babbler-0.7/include/bit-babbler/secret-sink.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/secret-sink.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2014 - 2016, Ron +// Copyright 2014 - 2018, Ron #ifndef _BB_SECRET_SINK_H #define _BB_SECRET_SINK_H @@ -30,6 +30,16 @@ size_t block_size; size_t bytes; + Options() + : block_delay( 0 ) + , block_size( 65536 ) + , bytes( 0 ) + {} + + // We don't need these anymore with the new seedd config parser. + // But hang on to them a bit longer in case something else does. + #if 0 + Options( const std::string &path, size_t bd = 0, size_t bs = 65536, size_t n = 0 ) : devpath( path ) , block_delay( bd ) @@ -75,6 +85,8 @@ } //}}} + #endif + }; //}}} @@ -89,6 +101,8 @@ void do_read_thread() { //{{{ + SetThreadName( stringprintf("QA %s", m_options.devpath.c_str()).c_str() ); + Log<3>( "SecretSink( %s ): begin read_thread\n", m_options.devpath.c_str() ); uint8_t buf[ m_options.block_size ]; @@ -99,7 +113,7 @@ { while( n < m_options.block_size ) { - int r = read( m_fd, buf + n, m_options.block_size - n ); + ssize_t r = read( m_fd, buf + n, m_options.block_size - n ); if( r < 0 ) throw SystemError( _("SecretSink( %s )::read( %zu ) failed"), @@ -108,7 +122,7 @@ if( r == 0 ) throw Error( _("SecretSink( %s )::read EOF"), m_options.devpath.c_str() ); - n += r; + n += size_t(r); } m_qa.Check( buf, n ); @@ -124,7 +138,7 @@ } if( m_options.block_delay ) - usleep( m_options.block_delay * 1000 ); + usleep( useconds_t(m_options.block_delay * 1000) ); } } //}}} diff -Nru bit-babbler-0.7/include/bit-babbler/secret-source.h bit-babbler-0.8/include/bit-babbler/secret-source.h --- bit-babbler-0.7/include/bit-babbler/secret-source.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/secret-source.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2010 - 2017, Ron +// Copyright 2010 - 2018, Ron #ifndef _BB_SECRET_SOURCE_H #define _BB_SECRET_SOURCE_H @@ -183,7 +183,7 @@ if( n != 0 ) { try { - sleep_init = StrToScaledUL( arg.substr(0, n) ); + sleep_init = StrToScaledU( arg.substr(0, n) ); } catch( const std::exception &e ) { @@ -195,7 +195,7 @@ if( n + 1 < arg.size() ) { try { - sleep_max = StrToScaledUL( arg.substr(n + 1) ); + sleep_max = StrToScaledU( arg.substr(n + 1) ); } catch( const std::exception &e ) { @@ -278,11 +278,11 @@ { //{{{ if( options.bitrate == m_bitrate ) - LogMsg<2>( "+ BitBabbler( bitrate %u, fold %u, mask 0x%02x )", - m_bitrate, m_fold, m_enable_mask ); + LogMsg<2>( "+ BitBabbler( bitrate %u, fold %u, mask 0x%02x [%02x] )", + m_bitrate, m_fold, options.enable_mask, m_enable_mask ); else - LogMsg<2>( "+ BitBabbler( bitrate %u (%u), fold %u, mask 0x%02x )", - options.bitrate, m_bitrate, m_fold, m_enable_mask ); + LogMsg<2>( "+ BitBabbler( bitrate %u (%u), fold %u, mask 0x%02x [%02x] )", + options.bitrate, m_bitrate, m_fold, options.enable_mask, m_enable_mask ); unsigned maxpacket = GetMaxPacketSize(); @@ -291,7 +291,7 @@ // maximum packet size and 64kB that will take less than 250ms to // transfer (which is then the maximum time we'll block waiting to // perform an orderly exit). - unsigned chunksize = std::max( maxpacket, + size_t chunksize = std::max( maxpacket, std::min( options.chunksize ? options.chunksize : 65536u, powof2_down(m_bitrate / 32 @@ -339,7 +339,7 @@ chunksize = SetChunkSize( chunksize ); SetLatency( latency ); - LogMsg<3>( "Chunk size %u, %u ms/per chunk (latency %u ms, max packet %u)", + LogMsg<3>( "Chunk size %zu, %zu ms/per chunk (latency %u ms, max packet %u)", chunksize, chunksize * 8000 / m_bitrate, latency, maxpacket ); if( claim_now ) @@ -577,7 +577,7 @@ if( e == arg || *e != ':' || v > unsigned(-1) ) throw Error( _("Invalid --group-size option '%s'"), arg ); - groupid = v; + groupid = Group::ID(v); size = StrToScaledUL( e + 1, 1024 ); } @@ -771,6 +771,14 @@ buf = new uint8_t[size]; + // Bump the refcount until the thread is started, otherwise we + // may lose a race with this Source being released by the caller + // before the thread can take its handle from the raw pointer. + // Think of it as a virtual Handle passed with pthread_create. + Ref(); + + // We don't need to Unref() if this fails, because we'll throw + // and it will never have been constructed to be destroyed ... int ret = pthread_create( &thread, GetDefaultThreadAttr(), Pool::source_thread, this ); if( ret ) @@ -1048,6 +1056,13 @@ } catch( const USBError &e ) { + // Don't warn about enum values not being explicitly handled here, + // we don't want to have to chase every new error code added to + // libusb that we don't explicitly care about handling here. + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpragmas" + #pragma GCC diagnostic ignored "-Wswitch-enum" + switch( e.GetErrorCode() ) { case LIBUSB_ERROR_PIPE: @@ -1067,6 +1082,7 @@ default: throw; } + #pragma GCC diagnostic pop } } //}}} @@ -1076,6 +1092,9 @@ Source::Handle s = static_cast( p ); + // Drop the 'virtual handle' from the ctor, we have a real one now. + s->Unref(); + try { s->pool->do_source_thread( s ); } @@ -1360,7 +1379,7 @@ if( ret ) LogErr<0>( ret, - "Pool::RemoveSource: failed to join thread for removed device %s\n", + "Pool::RemoveSource: failed to join thread for removed device %s", d->VerboseStr().c_str() ); else Log<4>( "Pool::RemoveSource: joined thread for %s\n", @@ -1445,7 +1464,7 @@ if( w == 0 ) throw Error( _("Pool::WriteToFD( %d ) EOF"), fd ); - c -= w; + c -= size_t(w); } if( len && (len -= n) == 0 ) @@ -1463,7 +1482,6 @@ pthread_t p; int ret = pthread_create( &p, GetDefaultThreadAttr(), writefd_thread, w ); - if( ret ) { delete w; @@ -1522,8 +1540,8 @@ const unsigned N = QA::FIPS::BUFFER_SIZE; // 20kbits for FIPS test. const unsigned folds = 2; - const int timeout = m_opt.kernel_refill_time - ? m_opt.kernel_refill_time * 1000 : -1; + const int timeout = int(m_opt.kernel_refill_time) + ? int(m_opt.kernel_refill_time * 1000) : -1; union { uint8_t b[N + sizeof(struct rand_pool_info)]; @@ -1567,8 +1585,8 @@ } while( ! source_ok || ! folded_ok ); - rpi.entropy_count = n * 8; - rpi.buf_size = n; + rpi.entropy_count = int(n * 8); + rpi.buf_size = int(n); #if EM_PLATFORM_LINUX @@ -1615,7 +1633,6 @@ pthread_t p; int ret = pthread_create( &p, GetDefaultThreadAttr(), feedkernel_thread, this ); - if( ret ) throw SystemError( ret, _("Pool::FeedKernelEntropyAsync: " "failed to create thread") ); @@ -1938,8 +1955,8 @@ { //{{{ return new_device( vendorid, productid, - StrToUL( busnum ? busnum : "", 10 ), - StrToUL( devnum ? devnum : "", 10 ), + StrToU( busnum ? busnum : "", 10 ), + StrToU( devnum ? devnum : "", 10 ), mfg ? mfg : "", product ? product : "", serial ? serial : "", diff -Nru bit-babbler-0.7/include/bit-babbler/socket.h bit-babbler-0.8/include/bit-babbler/socket.h --- bit-babbler-0.7/include/bit-babbler/socket.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/socket.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 1998 - 2016, Ron +// Copyright 1998 - 2018, Ron #ifndef _BB_SOCKET_H #define _BB_SOCKET_H @@ -12,6 +12,7 @@ #include #include #include + #include #elif EM_PLATFORM_MSW @@ -112,8 +113,9 @@ if( addrinf->ai_addrlen > sizeof(sockaddr_storage) ) { freeaddrinfo( addrinf ); - throw Error( _("SockAddr( '%s' ): ai_addrlen %u > sockaddr_storage %zu"), - AddrStr().c_str(), addrinf->ai_addrlen, sizeof(sockaddr_storage) ); + throw Error( _("SockAddr( '%s' ): ai_addrlen %ju > sockaddr_storage %zu"), + AddrStr().c_str(), uintmax_t(addrinf->ai_addrlen), + sizeof(sockaddr_storage) ); } addr_type = addrinf->ai_socktype; @@ -169,28 +171,24 @@ SocketError( const char *format, ... ) throw() : m_errno( WSAGetLastError() ) { - va_list arglist; - std::string msg( format ); - - msg.append( ": " ).append( GetSysMsg() ); - + va_list arglist; va_start( arglist, format ); - SetMessage( msg.c_str(), arglist ); + SetMessage( format, arglist ); va_end( arglist ); + + AppendMessage( std::string(": ") + GetSysMsg() ); } BB_PRINTF_FORMAT(3,4) SocketError( int code, const char *format, ... ) throw() : m_errno( code ) { - va_list arglist; - std::string msg( format ); - - msg.append( ": " ).append( GetSysMsg() ); - + va_list arglist; va_start( arglist, format ); - SetMessage( msg.c_str(), arglist ); + SetMessage( format, arglist ); va_end( arglist ); + + AppendMessage( std::string(": ") + GetSysMsg() ); } @@ -204,21 +202,24 @@ void LogSocketErr( const char *format, ... ) { //{{{ - va_list arglist; - std::string fmt( format ); - char errmsg[65536]; + char errmsg[65536]; + int errnum = WSAGetLastError(); - if( fmt.size() && fmt[fmt.size() - 1] == '\n' ) - fmt.erase( fmt.size() - 1 ); + va_list arglist; + va_start( arglist, format ); + + std::string msg = vstringprintf( format, arglist ); + + if( msg.size() && msg[msg.size() - 1] == '\n' ) + msg.erase( msg.size() - 1 ); errmsg[0] = '\0'; FormatMessageA( FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, - 0, WSAGetLastError(), 0, errmsg, sizeof(errmsg), NULL ); + 0, errnum, 0, errmsg, sizeof(errmsg), NULL ); - fmt.append(": ").append( errmsg ).append(1,'\n'); + msg.append(": ").append( errmsg ).append(1,'\n'); - va_start( arglist, format ); - Logv( fmt.c_str(), arglist ); + Log( "%s", msg.c_str() ); va_end( arglist ); } //}}} @@ -237,7 +238,7 @@ int ret = WSAStartup(MAKEWORD(2,2), &m_wsa); if( ret ) - throw Error( "WSAStartup failed with error %d" ); + throw Error( "WSAStartup failed with error %d", ret ); } ~WinsockScope() @@ -256,6 +257,132 @@ #endif + + static inline void EnableFreebind( int fd, const std::string &where = std::string() ) + { //{{{ + + #ifdef IP_FREEBIND + + int i = 1; + if( setsockopt( fd, IPPROTO_IP, IP_FREEBIND, &i, sizeof(i) ) == -1 ) + throw SocketError( _("%s: Failed to set IP_FREEBIND"), where.c_str() ); + + #elif defined(IP_BINDANY) + + // FreeBSD variant, requires PRIV_NETINET_BINDANY privilege to enable. + int i = 1; + if( setsockopt( fd, IPPROTO_IP, IP_BINDANY, &i, sizeof(i) ) == -1 ) + throw SocketError( _("%s: Failed to set IP_FREEBIND (IP_BINDANY)"), + where.c_str() ); + #elif defined(SO_BINDANY) + + // OpenBSD variant, requires superuser privilege to enable. + int i = 1; + if( setsockopt( fd, SOL_SOCKET, SO_BINDANY, &i, sizeof(i) ) == -1 ) + throw SocketError( _("%s: Failed to set IP_FREEBIND (SO_BINDANY)"), + where.c_str() ); + #else + + (void)fd; + Log<0>( _("%s: IP_FREEBIND is not supported on this platform\n"), where.c_str() ); + + #endif + + } //}}} + + + // Check if systemd is expecting us to acknowledge it. + //{{{ + // The actually documented guarantees here leave something to be desired, + // but if NOTIFY_SOCKET is set in the environment, and contains either an + // absolute path, or a string begining with an '@', then it is probably + // systemd indicating that it wants notification sent to either a named + // unix domain socket or an abstract socket, respectively. The actual + // address of the abstract socket is obtained by replacing the @ with a + // null character. Though that seems academic, because in practice it + // appears to always use a named socket. If something else sets this, + // then the caller who did that gets to keep all the pieces ... + // + // If this returns a non-empty string, the above conditions have been met. + //}}} + static inline std::string GetSystemdNotifySocket() + { //{{{ + + char *s = getenv("NOTIFY_SOCKET"); + + if( ! s || (s[0] != '@' && s[0] != '/') || s[1] == '\0' ) + return std::string(); + + return s; + + } //}}} + + // Send a notification message to systemd. + //{{{ + // This will do nothing if the NOTIFY_SOCKET was not set, otherwise it will + // try to send the given message to the indicated address and throw if we + // aren't able to do that. Since systemd doesn't actually acknowledge our + // acknowledgement, there's no way to know if this actually did anything + // aside from squirting a datagram out into the void. If it really was + // systemd expecting something from us, then it will terminate this process + // if it doesn't get a READY message before its timeout expires. It will + // also reject the message if the sender's SCM_CREDENTIALS are not included + // in the packet sent, but sendto(2) will include those for us, without + // needing to bloat the code here with some useless Trying To Look Clever, + // and then needing to guard most of that to try and keep it all portable. + //}}} + static inline void SystemdNotify( const std::string &msg, + const std::string &ns = GetSystemdNotifySocket() ) + { //{{{ + + #if EM_PLATFORM_POSIX + + if( ns.empty() ) + return; + + sockaddr_any_t addr; + socklen_t addrlen = socklen_t(offsetof(sockaddr_un, sun_path) + ns.size()); + + if( ns.size() >= sizeof(addr.un.sun_path) ) + throw Error( _("SystemdNotify: socket path '%s' is too long. " + "Maximum length is %zu bytes."), + ns.c_str(), sizeof(addr.un.sun_path) - 1 ); + + addr.un.sun_family = AF_UNIX; + ns.copy( addr.un.sun_path, sizeof(addr.un.sun_path) - 1 ); + addr.un.sun_path[ ns.size() ] = '\0'; + + // Systemd passes abstract socket addresses with an initial '@', + // but Linux identifies them by using a null as the first byte. + if( ns[0] == '@' ) + addr.un.sun_path[0] = '\0'; + + + int fd = socket( AF_UNIX, SOCK_DGRAM, 0 ); + + if( fd == -1 ) + throw SocketError( _("SystemdNotify( %s, %s ): failed to create socket"), + msg.c_str(), ns.c_str() ); + + ssize_t n = sendto( fd, msg.c_str(), msg.size(), MSG_NOSIGNAL, &addr.any, addrlen ); + + close(fd); + + if( n < 0 ) + throw SocketError( _("SystemdNotify( %s, %s ): failed to send message"), + msg.c_str(), ns.c_str() ); + if( size_t(n) < msg.size() ) + throw Error( _("SystemdNotify( %s, %s ): failed to send entire message" + " (only %zd/%zu bytes)"), + msg.c_str(), ns.c_str(), n, msg.size() ); + #else + + (void)msg; (void)ns; + + #endif + + } //}}} + } // BitB namespace #endif // _BB_SOCKET_H diff -Nru bit-babbler-0.7/include/bit-babbler/socket-source.h bit-babbler-0.8/include/bit-babbler/socket-source.h --- bit-babbler-0.7/include/bit-babbler/socket-source.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/socket-source.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2015 - 2016, Ron +// Copyright 2015 - 2018, Ron #ifndef _BB_SOCKET_SOURCE_H #define _BB_SOCKET_SOURCE_H @@ -28,6 +28,8 @@ void do_server_thread() { //{{{ + SetThreadName( "UDP out" ); + std::string addr = m_sa.AddrStr(); Log<3>( "SocketSource( %s ): begin server_thread\n", addr.c_str() ); @@ -76,13 +78,13 @@ #endif if( n == -1 ) - LogSocketErr<1>( _("SocketSource( %s ): sendto failed\n"), addr.c_str() ); + LogSocketErr<1>( _("SocketSource( %s ): sendto failed"), addr.c_str() ); else if( size_t(n) != r ) Log<2>( _("SocketSource( %s ): only %zd of %zu bytes sent\n"), addr.c_str(), n, r ); } else if( n == -1 ) - LogSocketErr<1>( _("SocketSource( %s ): recvfrom failed\n"), addr.c_str() ); + LogSocketErr<1>( _("SocketSource( %s ): recvfrom failed"), addr.c_str() ); else Log<2>( _("SocketSource( %s ): ignoring %zd byte message\n"), addr.c_str(), n ); @@ -128,7 +130,7 @@ typedef RefPtr< SocketSource > Handle; - SocketSource( const Pool::Handle &pool, const std::string &addr ) + SocketSource( const Pool::Handle &pool, const std::string &addr, bool freebind = false ) : m_pool( pool ) , m_sa( addr ) { //{{{ @@ -147,19 +149,23 @@ if( m_fd == -1 ) throw SocketError( _("SocketSource( %s ): failed to open socket"), addr.c_str() ); + try { + if( freebind ) + EnableFreebind( m_fd, stringprintf("SocketSource( %s )", addr.c_str()) ); - if( bind( m_fd, &m_sa.addr.any, m_sa.addr_len ) == -1 ) - { - Close(); - throw SocketError( _("SocketSource( %s ): bind failed"), addr.c_str() ); - } + if( bind( m_fd, &m_sa.addr.any, m_sa.addr_len ) == -1 ) + throw SocketError( _("SocketSource( %s ): bind failed"), addr.c_str() ); - int ret = pthread_create( &m_serverthread, GetDefaultThreadAttr(), server_thread, this ); - if( ret ) + int ret = pthread_create( &m_serverthread, GetDefaultThreadAttr(), server_thread, + this ); + if( ret ) + throw SystemError( ret, _("SocketSource( %s ): failed to create server thread"), + addr.c_str() ); + } + catch( ... ) { Close(); - throw SystemError( ret, _("SocketSource( %s ): failed to create server thread"), - addr.c_str() ); + throw; } } //}}} diff -Nru bit-babbler-0.7/include/bit-babbler/usbcontext.h bit-babbler-0.8/include/bit-babbler/usbcontext.h --- bit-babbler-0.7/include/bit-babbler/usbcontext.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/usbcontext.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2010 - 2017, Ron +// Copyright 2010 - 2018, Ron #ifndef _BB_USBCONTEXT_H #define _BB_USBCONTEXT_H @@ -119,7 +119,7 @@ if( n == 0 ) vid = 0; - else if( (vid = StrToUL( id.substr(0, n), 16 )) > 0xFFFF ) + else if( (vid = StrToU( id.substr(0, n), 16 )) > 0xFFFF ) throw 1; } catch( const abi::__forced_unwind& ) { @@ -132,7 +132,7 @@ if( n + 1 == id.size() ) pid = 0; - else if( (pid = StrToUL( id.substr(n + 1), 16 )) > 0xFFFF ) + else if( (pid = StrToU( id.substr(n + 1), 16 )) > 0xFFFF ) throw 1; } catch( const abi::__forced_unwind& ) { @@ -233,7 +233,7 @@ if( n != std::string::npos ) { try { - if( (busnum = StrToUL( id.substr(0, n), 10 )) > 127 ) + if( (busnum = StrToU( id.substr(0, n), 10 )) > 127 ) throw 1; } catch( const abi::__forced_unwind& ) { @@ -257,7 +257,7 @@ if( n != std::string::npos ) { try { - if( (busnum = StrToUL( id.substr(0, n), 10 )) > 127 ) + if( (busnum = StrToU( id.substr(0, n), 10 )) > 127 ) throw 1; } catch( const abi::__forced_unwind& ) { @@ -270,7 +270,7 @@ throw Error( _("Device::ID: invalid device address '%s'"), id.c_str() ); try { - devnum = StrToUL( id.substr(n + 1), 10 ); + devnum = StrToU( id.substr(n + 1), 10 ); if( devnum < 1 || devnum > 127 ) throw 1; @@ -289,7 +289,7 @@ if( id.size() < 4 ) { try { - devnum = StrToUL( id, 10 ); + devnum = StrToU( id, 10 ); if( devnum < 1 || devnum > 127 ) throw 1; @@ -633,7 +633,7 @@ { libusb_config_descriptor *c; - int ret = libusb_get_config_descriptor( dev, i, &c ); + int ret = libusb_get_config_descriptor( dev, uint8_t(i), &c ); if( ret ) throw USBError( ret, _("USBContext::Device::Config: " "failed to get configuration %zu descriptor"), i ); @@ -889,7 +889,7 @@ if( __builtin_expect( config < 1 || config > 255, 0 ) ) throw Error( _("Device( %s ): invalid current config (1 < %d < 256)"), m_device->IDStr().c_str(), config ); - return config; + return uint8_t(config); } //}}} @@ -1000,7 +1000,7 @@ return; } - int ret = libusb_clear_halt( m_handle, endpoint_address ); + int ret = libusb_clear_halt( m_handle, uint8_t(endpoint_address) ); if( ret ) throw USBError( ret, _("Device( %s ): ClearHalt failed for endpoint %02x"), m_device->IDStr().c_str(), endpoint_address ); @@ -1333,7 +1333,7 @@ const std::string &GetDevicePort() const { return m_devport; } const std::string &GetDevpath() const { return m_devpath; } - unsigned GetNumConfigurations() const { return m_configs.size(); } + unsigned GetNumConfigurations() const { return unsigned(m_configs.size()); } const Config::Vector &GetConfigurations() const { return m_configs; } const Config &GetConfiguration( size_t n ) const @@ -1484,10 +1484,10 @@ ScopedCancelState cancelstate; Device::Handle h; libusb_device **devs; - int ret = libusb_get_device_list( m_usb, &devs ); + ssize_t ret = libusb_get_device_list( m_usb, &devs ); if( ret < 0 ) - throw USBError( ret, _("USBContext: failed to enumerate devices") ); + throw USBError( int(ret), _("USBContext: failed to enumerate devices") ); for( libusb_device **dev = devs; *dev; ++dev ) { @@ -1680,10 +1680,10 @@ ScopedCancelState cancelstate; libusb_device **devs; - int ret = libusb_get_device_list( m_usb, &devs ); + ssize_t ret = libusb_get_device_list( m_usb, &devs ); if( ret < 0 ) - throw USBError( ret, _("USBContext: failed to enumerate devices") ); + throw USBError( int(ret), _("USBContext: failed to enumerate devices") ); if( ! append ) m_devices.clear(); @@ -1693,10 +1693,10 @@ libusb_device *d = *dev; libusb_device_descriptor desc; - ret = libusb_get_device_descriptor(d, &desc); - if( ret < 0 ) + int r = libusb_get_device_descriptor(d, &desc); + if( r < 0 ) { - LogUSBError<1>( ret, _("USBContext::EnumerateDevices: failed to get descriptor") ); + LogUSBError<1>( r, _("USBContext::EnumerateDevices: failed to get descriptor") ); continue; } @@ -1728,7 +1728,7 @@ { //{{{ ScopedMutex lock( &m_device_mutex ); - return m_devices.size(); + return unsigned(m_devices.size()); } //}}} diff -Nru bit-babbler-0.7/include/bit-babbler/users.h bit-babbler-0.8/include/bit-babbler/users.h --- bit-babbler-0.7/include/bit-babbler/users.h 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/include/bit-babbler/users.h 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2004 - 2015, Ron +// Copyright 2004 - 2018, Ron #ifndef _BB_USERS_H #define _BB_USERS_H @@ -39,7 +39,7 @@ buf = new char[bufsize]; - switch( getgrnam_r( group.c_str(), &grent, buf, bufsize, &have_result ) ) + switch( getgrnam_r( group.c_str(), &grent, buf, size_t(bufsize), &have_result ) ) { case 0: case ENOENT: diff -Nru bit-babbler-0.7/libvirt/bbvirt bit-babbler-0.8/libvirt/bbvirt --- bit-babbler-0.7/libvirt/bbvirt 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/libvirt/bbvirt 2018-02-07 23:56:52.000000000 +0000 @@ -1,6 +1,6 @@ #!/bin/bash # This file is distributed as part of the bit-babbler package. -# Copyright 2015 - 2016, Ron +# Copyright 2015 - 2018, Ron # Default configuration if not explicitly specified. config_dir="/etc/bit-babbler" @@ -20,7 +20,7 @@ verb() { - n=$1 + local n=$1 shift (( verbose < n )) || echo "$*" @@ -138,6 +138,83 @@ fi } +# Test if a string is valid to use in a constructed variable name. +# We need to explicitly check this to avoid having "undefined" but wrong things +# happen if we dereference an invalid indirect variable name. A "name" in bash +# is defined as: +# +# 'A word consisting only of alphanumeric characters and underscores, and +# beginning with an alphabetic character or an under‐score.' +# +# With an implicit assumption that all those characters are also only ASCII. +# We don't need to validate that the first character isn't a digit here, because +# we know we will always be appending this to a valid prefix string before use. +# We do want to validate that it's not an empty string though. +is_valid_as_variable_name() +{ + # If we could be sure this would only run with bash 4.3 or later, then + # we could use 'shopt -s globasciiranges' and drop the [:ascii:] test, + # but Wheezy still has bash 4.2 - alternatively we could force use of the + # C locale here to avoid having non-ascii characters collated into the + # range a-z, but not being locale agnostic is ugly, so just test against + # the :ascii: character class explicitly. + [[ -n $1 && $1 != *[^a-zA-Z0-9_]* && $1 != *[^[:ascii:]]* ]] +} + +# Build indices mapping config domain IDs (which must include only characters +# which are valid in variable names) to/from libvirt domain names (which don't +# restrict the allowed character set anymore). Given either ID as input, this +# lets us determine both the config ID and libvirt domain name when needed. +# They'll only differ when the DOMAIN_NAME_* override is used to explicitly +# specify the libvirt domain name. +map_domain_names() +{ + import_domain_config + + declare -gA libvirt_domains + declare -gA config_domains + local n k + + verb 4 "Mapping domain identifiers:" + + # First assume every config ID corresponds to a libvirt guest domain name. + for n in "${!DOMAIN_RNG_@}"; do + n=${n#DOMAIN_RNG_} + libvirt_domains[$n]=$n + config_domains[$n]=$n + done + + # It's not very likely that someone might have a DOMAIN_URI defined without + # a corresponding DOMAIN_RNG, so this is normally redundant, but in theory + # it is possible for someone to want to manually add a device to a domain + # which currently has its DOMAIN_RNG commented out, but still want to use + # the URI from the config instead of specifying that manually too. + for n in "${!DOMAIN_URI_@}"; do + n=${n#DOMAIN_URI_} + libvirt_domains[$n]=$n + config_domains[$n]=$n + done + + # Then override libvirt_domains for each ID with an explicit DOMAIN_NAME, + # and add a config_domains reverse mapping for the real libvirt guest name. + for n in "${!DOMAIN_NAME_@}"; do + k=${!n} + n=${n#DOMAIN_NAME_} + libvirt_domains[$n]=$k + config_domains[$k]=$n + done + + if (( verbose > 3 )); then + local s=" " + for n in "${!libvirt_domains[@]}"; do + echo " config 'DOMAIN_*_$n' ${s:0:15-${#n}}-> libvirt domain ${libvirt_domains[$n]}" + done + for n in "${!config_domains[@]}"; do + echo " domain name '$n' ${s:0:19-${#n}}-> config DOMAIN_*_${config_domains[$n]}" + done + fi +} + # Device array indices DA_STRIDE=9 @@ -158,7 +235,7 @@ done < <( "$seedd" --shell-mr ) if (( verbose > 3 )); then - printf "seedd reported:" + printf "seedd reported devices:" printf " '%s'" "${all_devices[@]}" printf "\n" fi @@ -168,7 +245,7 @@ # get_device_by index match [index match ...] get_device_by() { - compare=( "$@" ) + local i j compare=( "$@" ) selected_device=() for (( i = 0; i < ${#all_devices[@]}; i += DA_STRIDE )); do @@ -190,25 +267,37 @@ done } -# Do $action for each available device assigned to $domain (with $propagate_opts) +# Do $action for each available device assigned to domain $1 (with $propagate_opts) act_on_all_devices_in_domain() { - devs="DOMAIN_RNG_${domain}[@]" + local config_domain=${config_domains[$1]} + local devs="DOMAIN_RNG_${config_domain}[@]" + local dev + + verb 3 "" + verb 3 "${action^}ing all devices for domain '$1' (config '$config_domain')" + + if [ -z "$config_domain" ]; then + verb 1 "Domain '$1' has no devices assigned in '$config_file'." + return + fi for dev in "${!devs}"; do - verb 4 "VM $domain, $action device $dev" + verb 4 "Checking for device '$dev'" get_device_by "$DA_SERIAL" "$dev" if (( ${#selected_device[@]} == DA_STRIDE )); then - exec_opts=( "$action" "$dev" -D "$domain" ) + exec_opts=( "$action" "$dev" -D "$config_domain" ) exec_opts+=( -b "${selected_device[$DA_BUSNUM]}" ) exec_opts+=( -d "${selected_device[$DA_DEVNUM]}" ) exec_opts+=( "${propagate_opts[@]}" ) verb 2 "$0 ${exec_opts[*]}" "$0" "${exec_opts[@]}" + else + verb 2 "Failed to find device '$dev'." fi done } @@ -218,7 +307,7 @@ # with all the necessary options for each device in the requested domain(s). if [ -n "$do_all" ]; then - import_domain_config + map_domain_names get_available_devices propagate_opts=( ${config_file:+ -C "$config_file"} ) @@ -227,8 +316,9 @@ # Propagate verbose flags up to -vvvv (the maximum level we actually use), # accounting for the fact that dry_run bumps the verbosity level too. + v='' for (( i = ${dry_run:-0}; i < verbose; ++i )); do - v+="v" + v+='v' done propagate_opts+=( ${v:+ "-${v:0:4}"} ) @@ -236,14 +326,13 @@ if [ -n "$domain" ]; then # Act on all the devices configured for the given domain - act_on_all_devices_in_domain + act_on_all_devices_in_domain "$domain" else # Act on all the devices configured for all domains for dom in "${!DOMAIN_RNG_@}"; do - domain=${dom#DOMAIN_RNG_} - act_on_all_devices_in_domain + act_on_all_devices_in_domain "${dom#DOMAIN_RNG_}" done fi @@ -326,24 +415,29 @@ fi -# Build an index mapping device serial numbers to domain names. +# Build an index mapping device serial numbers to (config) domain names. map_devices_to_domains() { import_domain_config + declare -gA domains + local dom dev devs + + verb 4 "Mapping device serial numbers to domain identifiers:" + for dom in "${!DOMAIN_RNG_@}"; do - verb 4 "domain: $dom" + verb 4 " config: $dom" devs="${dom}[@]" for dev in "${!devs}"; do - verb 4 " dev: $dev" + verb 4 " dev: $dev" domains[$dev]=${dom#DOMAIN_RNG_} done done if (( verbose > 2 )); then for dev in "${!domains[@]}"; do - echo "device $dev is in domain ${domains[$dev]}" + echo " device $dev is in domain ${domains[$dev]}" done fi } @@ -355,7 +449,6 @@ # passing through to a VM. if [ -z "$domain" ]; then - declare -A domains map_devices_to_domains domain=${domains[$devserial]} @@ -364,11 +457,37 @@ verb 1 "Device '$devserial' is not assigned to any domain." exit 0 fi + + # We know the serial number lookup will return the config ID, so we can get + # the libvirt domain by just checking if DOMAIN_NAME_* was set for it too. + name_config="DOMAIN_NAME_$domain" + libvirt_domain=${!name_config:-$domain} + config_domain=$domain + +else + + # Find the config ID and libvirt domain name to use. If we don't have any + # mapping for the given $domain, then just use that name verbatim, since we + # could be here because someone is manually attaching or detaching a device + # to a libvirt domain which isn't included in the config file definitions, + # and that is an ok thing to be doing if complete automation isn't needed. + map_domain_names + + libvirt_domain=${libvirt_domains[$domain]:-$domain} + config_domain=${config_domains[$domain]} + + # Check if it's safe to fall back to assuming $domain for this one. + [ -n "$config_domain" ] || ! is_valid_as_variable_name "$domain" || config_domain="$domain" fi +verb 4 "Domain '$domain' => config '$config_domain', libvirt domain '$libvirt_domain'" + + # Check if we need to pass an explicit --connect URI to virsh. -if [ -z "$uri" ]; then - uri_config="DOMAIN_URI_$domain" +# The $config_domain should already be validated, so we could just check if +# it is not empty here, but it doesn't hurt to apply the full test here too. +if [ -z "$uri" ] && is_valid_as_variable_name "$config_domain"; then + uri_config="DOMAIN_URI_$config_domain" uri=${!uri_config} fi @@ -402,7 +521,7 @@ } -opts=( ${uri:+ -c "$uri"} "$action-device" "$domain" ) +opts=( ${uri:+ -c "$uri"} "$action-device" "$libvirt_domain" ) # Tell them what we are going to do. verb 1 "$virsh ${opts[*]} --live" diff -Nru bit-babbler-0.7/libvirt/qemu-hook bit-babbler-0.8/libvirt/qemu-hook --- bit-babbler-0.7/libvirt/qemu-hook 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/libvirt/qemu-hook 2018-02-07 23:56:52.000000000 +0000 @@ -1,6 +1,6 @@ #!/bin/bash # This file is distributed as part of the bit-babbler package. -# Copyright 2015 - 2017, Ron +# Copyright 2015 - 2018, Ron # # Example libvirt QEMU hook for cold-plugging BitBabbler devices into # newly started virtual machines. To use this, it must be installed @@ -40,13 +40,64 @@ done +# Test if a string is valid to use in a constructed variable name. +# We need to explicitly check this to avoid having "undefined" but wrong things +# happen if we dereference an invalid indirect variable name. A "name" in bash +# is defined as: +# +# 'A word consisting only of alphanumeric characters and underscores, and +# beginning with an alphabetic character or an under‐score.' +# +# With an implicit assumption that all those characters are also only ASCII. +# We don't need to validate that the first character isn't a digit here, because +# we know we will always be appending this to a valid prefix string before use. +# We do want to validate that it's not an empty string though. +is_valid_as_variable_name() +{ + # If we could be sure this would only run with bash 4.3 or later, then + # we could use 'shopt -s globasciiranges' and drop the [:ascii:] test, + # but Wheezy still has bash 4.2 - alternatively we could force use of the + # C locale here to avoid having non-ascii characters collated into the + # range a-z, but not being locale agnostic is ugly, so just test against + # the :ascii: character class explicitly. + [[ -n $1 && $1 != *[^a-zA-Z0-9_]* && $1 != *[^[:ascii:]]* ]] +} + +# Find the shell-friendly "config name" for the given libvirt domain name. +# If the guest name contains unicode characters, or anything else which would +# make it illegal to use as part of a bash variable name (like a '-'), then +# it needs to be explicitly mapped to a valid identifier with a DOMAIN_NAME_* +# declaration in the config file. +get_config_for_guest() +{ + for n in "${!DOMAIN_NAME_@}"; do + if [ "${!n}" = "$1" ]; then + config_name=${n#DOMAIN_NAME_} + return + fi + done + + if is_valid_as_variable_name "$1"; then + config_name=$1 + else + #echo "Invalid config name '$1'" + config_name='' + fi +} + if [ "$operation" = "started" ]; then - devices="DOMAIN_RNG_${guest_name}[@]" - opts=( -c change -s usb -a "idVendor=0403" -a "idProduct=7840" ) - for d in "${!devices}"; do - "$UDEVADM" trigger "${opts[@]}" -a "serial=$d" - done + get_config_for_guest "$guest_name" + + if [ -n "$config_name" ]; then + devices="DOMAIN_RNG_${config_name}[@]" + opts=( -c change -s usb -a "idVendor=0403" -a "idProduct=7840" ) + + for d in "${!devices}"; do + "$UDEVADM" trigger "${opts[@]}" -a "serial=$d" + done + fi + fi # Always return success here, we don't want to abort guest operations. diff -Nru bit-babbler-0.7/libvirt/vm.conf bit-babbler-0.8/libvirt/vm.conf --- bit-babbler-0.7/libvirt/vm.conf 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/libvirt/vm.conf 2018-02-07 23:56:52.000000000 +0000 @@ -6,6 +6,14 @@ # to be passed through to. Any number of devices and libvirt domains can be # configured here. # +# DOMAIN_NAME_="" +# - Provides a mapping from to the real libvirt domain name when that +# name contains unicode, or other special characters which aren't legal to +# use in a shell variable name (i.e. anything except ascii A-Z, a-z, 0-9 or +# and underscore). If this is not specified, then the default is to assume +# that is the literal libvirt domain name which the devices are to be +# assigned to by DOMAIN_RNG_. +# # DOMAIN_URI_="" # - Is the optional --connect URI passed to virsh(1) for domain . # If not specified, then the system default for virsh (for the user that @@ -25,6 +33,7 @@ #DOMAIN_URI_sid="qemu:///system" #DOMAIN_RNG_sid=( KWIF4Q JAXJE6 ) +#DOMAIN_NAME_kbsd="kbsd-unstable" #DOMAIN_URI_kbsd="qemu+ssh://your.domain/system" #DOMAIN_RNG_kbsd=( G2SJ3Z ) diff -Nru bit-babbler-0.7/Makeup/ac-fragments/configure.stdtools bit-babbler-0.8/Makeup/ac-fragments/configure.stdtools --- bit-babbler-0.7/Makeup/ac-fragments/configure.stdtools 2017-05-21 09:21:57.000000000 +0000 +++ bit-babbler-0.8/Makeup/ac-fragments/configure.stdtools 2018-02-01 09:59:14.000000000 +0000 @@ -1,6 +1,6 @@ dnl Makeup configure boilerplate. dnl -dnl Copyright 2003 - 2016, Ron +dnl Copyright 2003 - 2018, Ron dnl dnl This file is distributed under the terms of the GNU GPL version 2. dnl @@ -19,13 +19,94 @@ CXXFLAGS=${CXXFLAGS-} +dnl These are separately precious because overriding {C,CXX}FLAGS should not +dnl normally mask the C/C++ standard that a project is built with, and that +dnl might not be an immediately obvious consequence of setting them explictly. +dnl If you really want to override that, do it with these (or by changing the +dnl PACKAGE_{C,XX}STD set for the project), which likewise will also preserve +dnl whatever other compiler flags would normally be used. +AC_ARG_VAR([C_STANDARD], [flags to set the compiler C standard to use]) +AC_ARG_VAR([CXX_STANDARD], [flags to set the compiler C++ standard to use]) + + +dnl Not all platforms have GCC as their default compiler anymore, even if it is +dnl still available by default. Autoconf still prefers to use GCC by default +dnl in the AC_PROG_{CC,CXX} tests though. These variables let the search order +dnl be explicitly specified by the user, and let us automatically tweak it for +dnl different platforms. +AC_ARG_VAR([CC_SEARCH], [space separated list of which C compiler to prefer]) +AC_ARG_VAR([CXX_SEARCH], [space separated list of which C++ compiler to prefer]) + +# Oddly enough, the most preferred compiler is a platform specific thing, not a +# universal truth. Who could have guessed ... + +dnl Keeping this list current with the changing Winds of Whim could become a +dnl rather tedious and fragile thing, so it's tempting to default to checking +dnl for cc and c++ first everywhere, on the assumption that all modern systems +dnl now have that as an alias to their actually preferred toolchains, but that +dnl has the downside of making it less obvious exactly which compiler is being +dnl used, and making it even more fragile if some user has changed it from what +dnl the normal platform default would otherwise be ... So let's see how this +dnl goes for a while. At present the platform needing this most is OpenBSD, +dnl since it still ships an ancient "last of the GPLv2" gcc in its base set, +dnl but actually has clang as its default and preferred compiler. +case $host in + *-*-openbsd* ) + dnl OpenBSD (as of 6.2) still has GCC 4.2.1 installed in its base set, + dnl but "defaults" to clang (which is what /usr/bin/cc points to), so + dnl test for a working clang before gcc there. + AS_IF([test -z "$CC_SEARCH"],[CC_SEARCH="clang gcc cc"]) + AS_IF([test -z "$CXX_SEARCH"],[CXX_SEARCH="clang++ g++ c++"]) + ;; + + * ) + dnl By default, do what autoconf would otherwise do and prefer GCC, + dnl except our second choice is clang (which it entirely ignores), + dnl and we don't bother looking for the obscure C++ compilers which + dnl it would check for if it doesn't find g++ or c++. When someone + dnl proves they want them, and that they can compile our code, then + dnl we can revise this list to add them. + dnl + dnl Ideally, we'd have defaulted to calling AC_PROG_{CC,CXX} with an + dnl empty argument, and just let it do its own default thing, but that + dnl macro is too broken to enable that, it checks if the argument is + dnl empty during the m4 pass, so it considers an empty variable to be + dnl an explicit list (and then fails at runtime with no compilers to + dnl check) - and we can't AS_IF it and call it either with or without + dnl arguments at runtime, because there are tests in there which will + dnl only expand once, and so everything falls apart when they are only + dnl expanded in the dead branch ... The assumption that it will only + dnl ever appear once in one code path goes deep there. + AS_IF([test -z "$CC_SEARCH"],[CC_SEARCH="gcc clang cc"]) + AS_IF([test -z "$CXX_SEARCH"],[CXX_SEARCH="g++ clang++ c++"]) + ;; +esac + + # Check standard tools. -AC_PROG_CC +AC_PROG_CC([$CC_SEARCH]) AC_PROG_CPP -AC_PROG_CXX +AC_PROG_CXX([$CXX_SEARCH]) AC_PROG_CXXCPP +dnl If we explicitly set the C/C++ standard to use, then ensure that is passed +dnl when the preprocessor is run during the tests that follow. This is a bit +dnl sketchy, because really this ought to be done as part of testing for how to +dnl run the preprocessor above - and there are no separate variables for the +dnl preprocessor flags for C and C++, the autoconf tests just use CPPFLAGS for +dnl both, which is a bit difficult when we want to specify a C or C++ standard +dnl to use in mixed code. If we don't do this though, then we can see dodgy or +dnl misleading test results for things like AC_CHECK_HEADERS which runs both +dnl the compiler and preprocessor as separate tests. If CFLAGS or CXXFLAGS set +dnl a standard to use and the preprocessor flags do not, then the results could +dnl be conflicting when things which do vary according to the standard that is +dnl being used are involved. Fortunately, CPP and CXXCPP generally aren't used +dnl very often outside of the feature tests here, and if there is a problem it +dnl will probably shake out fairly early in the first test which does use it. +AS_IF([test -n "$C_STANDARD"],[CPP="$CPP $C_STANDARD"]) +AS_IF([test -n "$CXX_STANDARD"],[CXXCPP="$CXXCPP $CXX_STANDARD"]) + AC_PROG_LEX dnl AC_PROG_YACC dnl Do this instead since we want real bison usually, @@ -44,31 +125,6 @@ AC_CHECK_PROGS([GENHTML],[genhtml],[:]) -PTHREAD_CPPFLAGS="-pthread" -PTHREAD_LDFLAGS="-pthread" - -save_CPPFLAGS=$CPPFLAGS -CPPFLAGS="$CPPFLAGS $PTHREAD_CPPFLAGS" - -AC_CACHE_CHECK([if _REENTRANT is defined by the compiler], [ac_cv_have_reentrant], - [AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ -#ifndef _REENTRANT -#error "_REENTRANT was not defined" -#endif - ]]) - ], - [ac_cv_have_reentrant=yes], - [ac_cv_have_reentrant=no] - )] -) - -CPPFLAGS=$save_CPPFLAGS - -AS_IF([test "$ac_cv_have_reentrant" != "yes"],[ - PTHREAD_CPPFLAGS="$PTHREAD_CPPFLAGS -D_REENTRANT" -]) - - # Check standard args. AC_ARG_ENABLE([pipe], @@ -234,10 +290,7 @@ dnl These may all go in platform dependent conditionals one day. -dnl Note that we disable Wformat-nonliteral from Wformat=2 because -dnl it barks about the Emerald::format_str<> template that we -dnl use to output generic types in some places. -dnl And we no longer use -Wconversion. It was mainly designed as +dnl We no longer use -Wconversion. It was mainly designed as dnl a filter for porting old C code where parameters may be type dnl promoted in the absence of a new style prototype, and should dnl not be used to check new code. False positives in the xlocale @@ -245,7 +298,7 @@ dnl make it officially more trouble than it is presently worth. cc_warnings=" -Wall" cc_fail_on_warn=" -Werror" -cc_extra_warnings=" -W -Wpointer-arith -Wcast-qual -Wcast-align -Wformat=2 -Wno-format-nonliteral -Wfloat-equal" +cc_extra_warnings=" -W -Wpointer-arith -Wcast-qual -Wcast-align -Wformat=2 -Wfloat-equal" cc_optimise=" -O2" cc_profile=" -pg" @@ -288,10 +341,49 @@ CFLAGS=${CFLAGS:-$cc_flags$c_flags} CXXFLAGS=${CXXFLAGS:-$cc_flags$cxx_flags} +AS_IF([test -n "$C_STANDARD"],[CFLAGS="$C_STANDARD $CFLAGS"]) +AS_IF([test -n "$CXX_STANDARD"],[CXXFLAGS="$CXX_STANDARD $CXXFLAGS"]) + # add 's' here and omit ranlib from the build step ARFLAGS=rDvs +dnl We need to test if these extra warnings are actually supported by the +dnl toolchain in use, we can't safely assume that it does with this lot. +dnl The -Wsuggest-attribute options are currently GCC specific. +AS_IF([test "$ac_cv_enable_extra_warnings" = yes],[ + ACM_ADD_COMPILER_WARNING([C,CXX],[suggest-attribute=format, + suggest-attribute=const, + suggest-attribute=pure, + suggest-attribute=noreturn]) +]) + + +PTHREAD_CPPFLAGS="-pthread" +PTHREAD_LDFLAGS="-pthread" + +save_CPPFLAGS=$CPPFLAGS +CPPFLAGS="$CPPFLAGS $PTHREAD_CPPFLAGS" + +AC_CACHE_CHECK([if _REENTRANT is defined by the compiler], [ac_cv_have_reentrant], + [AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ +#ifndef _REENTRANT +#error "_REENTRANT was not defined" +#endif + ]]) + ], + [ac_cv_have_reentrant=yes], + [ac_cv_have_reentrant=no] + )] +) + +CPPFLAGS=$save_CPPFLAGS + +AS_IF([test "$ac_cv_have_reentrant" != "yes"],[ + PTHREAD_CPPFLAGS="$PTHREAD_CPPFLAGS -D_REENTRANT" +]) + + dnl bison3 complains loudly about a bunch of constructs that must still be used dnl if compatibility with bison2 is required, and appears to give us no clean dnl way to deal with that at all. We can tell bison3 not to bark by passing it diff -Nru bit-babbler-0.7/Makeup/ac-fragments/makeup.m4 bit-babbler-0.8/Makeup/ac-fragments/makeup.m4 --- bit-babbler-0.7/Makeup/ac-fragments/makeup.m4 2017-05-21 09:21:57.000000000 +0000 +++ bit-babbler-0.8/Makeup/ac-fragments/makeup.m4 2018-02-01 09:59:14.000000000 +0000 @@ -1,6 +1,6 @@ dnl Makeup aclocal macros. dnl -dnl Copyright 2003 - 2016, Ron +dnl Copyright 2003 - 2017, Ron dnl dnl These macros are distributed under the terms of the GNU GPL version 2. dnl @@ -179,9 +179,9 @@ AC_CONFIG_COMMANDS([Makefile], [ cat > Makefile < +# Copyright 2003 - 2017, Ron # # This file is distributed under the terms of the GNU GPL version 2. # @@ -303,7 +303,7 @@ cat > $_TEMPFILE < + * Copyright 2003 - 2017, Ron * * This file is distributed under the terms of the GNU GPL version 2. * @@ -400,16 +400,27 @@ #endif -// Compiler version test. +// Compiler version tests. // // This macro will return false if the version of gcc in use // is earlier than the specified major, minor limit, or if gcc // is not being used. Otherwise it will evaluate to be true. -#define EM_COMPILER_GCC( major, minor ) \\ - ( defined(__GNUC__) && defined(__GNUC_MINOR__) \\ - && ( ( __GNUC__ > (major) ) \\ - || ( __GNUC__ == (major) && __GNUC_MINOR__ >= (minor) ) ) ) +// This will also be true for the clang compiler, for whatever +// GCC version it is pretending to be compatible with. +#if defined(__GNUC__) && defined(__GNUC_MINOR__) + #define EM_COMPILER_GCC( major, minor ) ( ( __GNUC__ > (major) ) \\ + || ( __GNUC__ == (major) && __GNUC_MINOR__ >= (minor) ) ) +#else + #define EM_COMPILER_GCC( major, minor ) 0 +#endif +// As above, except for the clang compiler instead. +#if defined(__clang_major__) && defined(__clang_minor__) + #define EM_COMPILER_CLANG( major, minor ) ( ( __clang_major__ > (major) ) \\ + || ( __clang_major__ == (major) && __clang_minor__ >= (minor) ) ) +#else + #define EM_COMPILER_CLANG( major, minor ) 0 +#endif #endif // ${_GUARD} @@ -468,3 +479,200 @@ ]) #~ + +#~ _ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION([FLAGS_PREFIX],[COMPILER_VAR]) +# +# Check if the compiler considers unknown warning options to be an error +# by default, or if it needs an explicit extra option passed to do so. +# This macro is an implementation detail required by ACM_ADD_COMPILER_WARNING. +# +# Testing whether a warning option is supported can be tricky. By default +# GCC will consider -Wfoo to be an error if 'foo' is an unknown warning, +# but it will not even emit a diagnostic for -Wno-foo unless some other +# diagnostic message is also triggered, in which case it will merely warn +# that an unrecognised option is also present. +# +# With the clang toolchain the behaviour in both cases is controlled by an +# explicit option: -Wunknown-warning-option, which is enabled by default. +# If that option is negated then no diagnostic is output, otherwise unknown +# warning options of either polarity will simply emit a warning. If we want +# to test whether a warning option is supported then we need to explicitly +# add unknown-warning-option to the -Werror set to provoke a test failure +# if it is not. +# +# The FLAGS_PREFIX determines which language will be tested and so which of +# the *FLAGS variables the test options will be added to. Supported values +# are currently C and CXX. +# +# The COMPILER_VAR is only used to report the toolchain being tested, so for +# C it should be [$CC] and for C++ it should be [$CXX]. This parameter is +# passed as a convenience, since there is no strictly consistent rule which +# maps all the related identifiers for a language together, and the caller +# should already know it, so we don't need extra logic here to look it up. +# +# The output variable ACM_${FLAGS_PREFIX}_WARNINGFAIL will be set to either +# an empty string or the additional option(s) which need to be set in the +# relevant *FLAGS when testing whether some warning option is supported. +# +# This macro shouldn't normally be invoked directly, instead the language +# specific wrappers which don't need options (but whose name can still be +# constructed from other macros) should be AC_REQUIRE'd before the output +# variable is needed for the first time. +# ---------------------------------------------------------------------------- + +AC_DEFUN([_ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION], +[ +m4_case([$1], + [C],[AC_LANG_PUSH([C])], + [CXX],[AC_LANG_PUSH([C++])], + [m4_fatal([_ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION: unknown toolchain type '$1'])]) + +acm_save_$1FLAGS="$$1FLAGS" +$1FLAGS="$$1FLAGS -Womg-wtf-not-an-option" + +ACM_$1_WARNINGFAIL="" + +AC_CACHE_CHECK([if $2 unknown warning options are errors],[mu_cv_$1_flag_uwo], + [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]],[[]])], + [mu_cv_$1_flag_uwo=no], + [mu_cv_$1_flag_uwo=yes] + ) + ]) +AS_IF([test "$mu_cv_$1_flag_uwo" = no],[ + $1FLAGS="$acm_save_$1FLAGS -Werror=unknown-warning-option -Womg-wtf-not-an-option" + AC_CACHE_CHECK([if $2 supports -Werror=unknown-warning-option],[mu_cv_$1_flag_werror_uwo], + [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]],[[]])], + [mu_cv_$1_flag_werror_uwo=no], + [mu_cv_$1_flag_werror_uwo=yes] + ) + ]) + dnl It should be safe to fail open here. If we haven't figured out how to make the + dnl compiler fail when passed an unknown warning option, then it should be relatively + dnl safe to let tests default to passing them anyway. At best, they will actually + dnl work as intended, and at worst it might make a lot of noise spitting out non-fatal + dnl warning diagnostics about not liking them - but it shouldn't break the build. + dnl We bark a warning here so that this test can be improved further if that occurs, + dnl and err on the side of including rather than excluding extra warnings. + AS_IF([test "$mu_cv_$1_flag_werror_uwo" = yes], + [ACM_$1_WARNINGFAIL=" -Werror=unknown-warning-option"], + [AC_MSG_WARN([Don't know how to make $2 fail with unknown warning options,]) + AC_MSG_WARN([so later tests may (wrongly) decide to pass them to it anyway.])]) + ]) + +$1FLAGS="$acm_save_$1FLAGS" + +m4_case([$1], + [C],[AC_LANG_POP([C])], + [CXX],[AC_LANG_POP([C++])], + [m4_fatal([_ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION: unknown toolchain type '$1'])]) +]) +#~ + + +#~ _ACM_C_WERROR_UNKNOWN_WARNING_OPTION +# +# C language wrapper for _ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION +# which can be AC_REQUIRE'd. +# ------------------------------------------------------------------- + +AC_DEFUN([_ACM_C_WERROR_UNKNOWN_WARNING_OPTION], +[ +_ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION([C],[$CC]) +]) +#~ + + +#~ _ACM_CXX_WERROR_UNKNOWN_WARNING_OPTION +# +# C++ language wrapper for _ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION +# which can be AC_REQUIRE'd. +# --------------------------------------------------------------------- + +AC_DEFUN([_ACM_CXX_WERROR_UNKNOWN_WARNING_OPTION], +[ +_ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION([CXX],[$CXX]) +]) +#~ + + +#~ __ACM_ADD_COMPILER_WARNING([FLAGS_PREFIX],[WARNING_OPTION],[CACHE_VAR_SUFFIX]) +# +# Implementation of _ACM_ADD_COMPILER_WARNING for doing the individial tests of +# each of the specified WARNING_OPTIONS. The correct default language should be +# already set, so here we test if -W${WARNING_OPTION} is supported, caching the +# result of that test in mu_cv_${FLAGS_PREFIX}_flag_${CACHE_VAR_SUFFIX}, and +# appending any supported warning options to ${FLAGS_PREFIX}FLAGS. +# ------------------------------------------------------------------------------ + +AC_DEFUN([__ACM_ADD_COMPILER_WARNING], +[ +acm_save_$1FLAGS="$$1FLAGS" +$1FLAGS="$$1FLAGS$ACM_$1_WARNINGFAIL -W$2" + +dnl We need to special case C => $CC here, but CXX => $CXX can be implicit. +AC_CACHE_CHECK([if m4_case([$1],[C],[$CC],[$$1]) supports -W$2],[mu_cv_$1_flag_$3], + [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]],[[]])], + [mu_cv_$1_flag_$3=yes], + [mu_cv_$1_flag_$3=no] + ) + ]) +AS_IF([test "$mu_cv_$1_flag_$3" = yes], + [$1FLAGS="$acm_save_$1FLAGS -W$2"], + [$1FLAGS="$acm_save_$1FLAGS"]) +]) +#~ + + +#~ _ACM_ADD_COMPILER_WARNING([FLAGS_PREFIX],[WARNING_OPTIONS]) +# +# Implementation of ACM_ADD_COMPILER_WARNING for doing the individial tests with +# each of the specified FLAGS_PREFIXES. This will temporarily switch the default +# language based on FLAGS_PREFIX, then test if each of the comma-separated list +# of WARNING_OPTIONS is supported for that language. +# ------------------------------------------------------------------------------ + +AC_DEFUN([_ACM_ADD_COMPILER_WARNING], +[ +dnl We do this check before the AC_REQUIRE below, because the most likely cause +dnl of this failing is a typo in user code invoking ACM_ADD_COMPILER_WARNING +dnl and this gives a more user friendly warning pointing to the correct place +dnl when the m4 is being processed by aclocal/autom4te, rather than having that +dnl propagate deeper into the implementation detail before being caught. +dnl +dnl But it will not change the default toolchain for the invocation of the +dnl requirement, since that gets expanded outside of the scope of the push/pop +dnl used here, so it will need to do this itself as well to be run with the +dnl correct toolchain. +m4_case([$1], + [C],[AC_LANG_PUSH([C])], + [CXX],[AC_LANG_PUSH([C++])], + [m4_fatal([ACM_ADD_COMPILER_WARNING: unknown toolchain type '$1'])]) + +AC_REQUIRE([_ACM_$1_WERROR_UNKNOWN_WARNING_OPTION]) + +m4_foreach([opt],[$2],[__ACM_ADD_COMPILER_WARNING([$1],opt,m4_tolower(m4_bpatsubst(opt,[[^A-Za-z0-9]],[_])))]) + +m4_case([$1], + [C],[AC_LANG_POP([C])], + [CXX],[AC_LANG_POP([C++])], + [m4_fatal([ACM_ADD_COMPILER_WARNING: unknown toolchain type '$1'])]) +]) +#~ + + +#~ ACM_ADD_COMPILER_WARNING([FLAGS_PREFIXES],[WARNING_OPTIONS]) +# +# For each combination of the comma-separated FLAGS_PREFIXES and WARNING_OPTIONS +# check if the corresponding compiler supports -W${WARNING_OPTION} and if it does +# add it to the *FLAGS for that language for use when compiling subsequent source. +# +# Currently supported values for FLAGS_PREFIXES are C and CXX (use [C,CXX] to +# test and set options for both the C and C++ compiler). +# ---------------------------------------------------------------------------- + +AC_DEFUN([ACM_ADD_COMPILER_WARNING], +[ +m4_foreach([lang],[$1],[_ACM_ADD_COMPILER_WARNING(lang,[$2])]) +]) +#~ + diff -Nru bit-babbler-0.7/Makeup/config/configure.bit-babbler bit-babbler-0.8/Makeup/config/configure.bit-babbler --- bit-babbler-0.7/Makeup/config/configure.bit-babbler 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/Makeup/config/configure.bit-babbler 2018-02-07 23:56:52.000000000 +0000 @@ -1,6 +1,6 @@ dnl Makeup extra configuration for bit-babbler. dnl -dnl Copyright 2003 - 2017, Ron Lee. +dnl Copyright 2003 - 2018, Ron Lee. dnl AC_LANG_PUSH([C++]) @@ -12,6 +12,10 @@ ;; *-*-cygwin* | *-*-mingw32* ) + dnl We don't have unix domain sockets on windows, so default to TCP there. + AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], + [SEEDD_CONTROL_SOCKET=tcp:localhost:56789]) + dnl We need at least 0x0600 to get AI_ADDRCONFIG for getaddrinfo ac_cv_env_winver=0x0600 ac_cv_env__win32_winnt=0x0600 @@ -30,19 +34,27 @@ ;; *-*-openbsd* ) - AC_DEFINE([HAVE_BROKEN_STDIO_LOCKING],[1], - [Workaround OpenBSD _thread_flockfile cancellation bug]) + dnl We don't have a /run directory by default, so use /var/run + AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], + [SEEDD_CONTROL_SOCKET=/var/run/bit-babbler/seedd.socket]) dnl The default pthread stack size on OpenBSD 6.1 is 512kB, so fix that. - THREAD_STACK_SIZE=8192 + AS_IF([test -z "$THREAD_STACK_SIZE"],[THREAD_STACK_SIZE=8192]) + + AC_DEFINE([HAVE_BROKEN_STDIO_LOCKING],[1], + [Workaround OpenBSD _thread_flockfile cancellation bug]) ;; *-*-freebsd* ) + dnl We don't have a /run directory by default, so use /var/run + AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], + [SEEDD_CONTROL_SOCKET=/var/run/bit-babbler/seedd.socket]) + dnl The default pthread stack size on FreeBSD 11 is 2MB, so fix that. dnl So far we haven't actually had this smash the stack there with dnl the default size (unlike OpenBSD, MacOS and Windows), but let's dnl not wait until we do, just use the same size as everywhere else. - THREAD_STACK_SIZE=8192 + AS_IF([test -z "$THREAD_STACK_SIZE"],[THREAD_STACK_SIZE=8192]) dnl On FreeBSD 11, both gcc6 and gcc7 will miscompile code when the dnl -fguess-branch-probability optimisation is enabled (which it is @@ -119,13 +131,35 @@ ;; *-*-darwin* ) + dnl We don't have a /run directory by default, so use /var/run + AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], + [SEEDD_CONTROL_SOCKET=/var/run/bit-babbler/seedd.socket]) + dnl The default pthread stack size on MacOS is only 512kB, and we expect to dnl need more than that, so bring it into line with the normal Linux default. - THREAD_STACK_SIZE=8192 + AS_IF([test -z "$THREAD_STACK_SIZE"],[THREAD_STACK_SIZE=8192]) ;; esac +dnl /var could be a remote mount which isn't available at early boot when seedd +dnl is first started, but /run is supposed to be ready before any ordinary early +dnl boot process even if it is a separate mount like a tmpfs, so default to it +dnl unless we know it's not expected to be supported. FHS 3.0 allows /var/run +dnl to be an alias to /run, and that is what most (but not all) Linux distros +dnl currently do. The BSDs (aside from Debian's kFreeBSD port) aren't riding +dnl this train yet though, so we still use /var/run there instead of rudely +dnl creating a new directory in the root of people's systems. +AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], + [SEEDD_CONTROL_SOCKET=/run/bit-babbler/seedd.socket]) + +AC_ARG_VAR([SEEDD_CONTROL_SOCKET], [Set the default to use for the seedd control socket]) +AS_IF([test -n "$SEEDD_CONTROL_SOCKET"],[ + AC_DEFINE_UNQUOTED([SEEDD_CONTROL_SOCKET],["$SEEDD_CONTROL_SOCKET"], + [Set the default to use for the seedd control socket]) + ]) + + dnl Clang bitches about "GNU old-style field designator extension" in C++ code dnl even though the C99 designated initialiser style isn't supported by C++, dnl even as a GNU extension. We use the old initialiser style though, so hush @@ -456,7 +490,11 @@ AC_MSG_NOTICE([Configured bit-babbler $PACKAGE_VERSION]) -AC_MSG_NOTICE([ with udev: $ac_cv_with_udev]) +AC_MSG_NOTICE([ with udev: $ac_cv_with_udev]) +AC_MSG_NOTICE([ SEEDD_CONTROL_SOCKET: $SEEDD_CONTROL_SOCKET]) +AS_IF([test -n "$THREAD_STACK_SIZE"],[ +AC_MSG_NOTICE([ THREAD_STACK_SIZE: $THREAD_STACK_SIZE]) +]) case $host in @@ -478,3 +516,4 @@ ;; esac +AC_CONFIG_FILES([munin/bit_babbler],[chmod +x munin/bit_babbler]) diff -Nru bit-babbler-0.7/Makeup/config/Package.conf bit-babbler-0.8/Makeup/config/Package.conf --- bit-babbler-0.7/Makeup/config/Package.conf 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/Makeup/config/Package.conf 2018-02-07 23:56:52.000000000 +0000 @@ -1,10 +1,11 @@ PACKAGE_NAME = bit-babbler -PACKAGE_VERSION = 0.7 +PACKAGE_VERSION = 0.8 PACKAGE_MAINTAINER = ron@debian.org +PACKAGE_CXXSTD = -std=gnu++98 PACKAGE_TESTS = configure.stdtools configure.i18n PACKAGE_CONFIG_HEADER = setup.h PACKAGE_DIST_TYPE = git PACKAGE_DIST_COMPRESS = gz PACKAGE_BUILD_ROOTCMD = fakeroot PACKAGE_INSTALL_ROOTCMD = sudo -PACKAGE_TARGETS = seedd bbctl bbcheck bbvirt vm-conf munin-script munin-conf man1 +PACKAGE_TARGETS = seedd seedd-conf seedd-service bbctl bbcheck bbvirt vm-conf munin-script munin-conf man1 diff -Nru bit-babbler-0.7/Makeup/config/target.munin-script bit-babbler-0.8/Makeup/config/target.munin-script --- bit-babbler-0.7/Makeup/config/target.munin-script 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/Makeup/config/target.munin-script 2018-02-07 23:56:52.000000000 +0000 @@ -1,6 +1,25 @@ munin-script_TYPE = DATA -munin-script_DATA_SRCDIR = $(srcdir)/munin +munin-script_DATA_SRCDIR = $(top_builddir)/munin munin-script_DATA_INSTALLDIR = $(datadir)/munin/plugins munin-script_DATA_FILES = bit_babbler INSTALL_DATA = $(INSTALL_PROGRAM) + +# Regenerate the plugin script if config.status or its source file changed. +$(top_builddir)/munin/bit_babbler: $(srcdir)/munin/bit_babbler.in $(top_builddir)/config.status + @echo -n " * Updating $@... " + @cd $(top_builddir) && ./config.status $@ + +# This one is a little unconventional. We don't usually remove files created +# by config.status in the normal clean target, but this one actually is a build +# target, not a part of the build system, and if we don't remove it here, then +# nothing else would either, aside from completely removing a build subdir that +# it was generated in. So nothing would clean it if it's configured in-tree. +do_clean_munin-script: + @echo " Removing: munin/bit_babbler" + @$(RM) "$(top_builddir)/munin/bit_babbler" + @rmdir $(top_builddir)/munin 2>/dev/null || true + +clean_munin-script: do_clean_munin-script + +.PHONY: do_clean_munin-script diff -Nru bit-babbler-0.7/Makeup/config/target.seedd-conf bit-babbler-0.8/Makeup/config/target.seedd-conf --- bit-babbler-0.7/Makeup/config/target.seedd-conf 1970-01-01 00:00:00.000000000 +0000 +++ bit-babbler-0.8/Makeup/config/target.seedd-conf 2018-02-07 23:56:52.000000000 +0000 @@ -0,0 +1,4 @@ +seedd-conf_TYPE = DATA +seedd-conf_DATA_SRCDIR = $(srcdir)/doc/examples +seedd-conf_DATA_INSTALLDIR = /etc/bit-babbler +seedd-conf_DATA_FILES = seedd.conf diff -Nru bit-babbler-0.7/Makeup/config/target.seedd-service bit-babbler-0.8/Makeup/config/target.seedd-service --- bit-babbler-0.7/Makeup/config/target.seedd-service 1970-01-01 00:00:00.000000000 +0000 +++ bit-babbler-0.8/Makeup/config/target.seedd-service 2018-02-07 23:56:52.000000000 +0000 @@ -0,0 +1,4 @@ +seedd-service_TYPE = DATA +seedd-service_DATA_SRCDIR = $(srcdir)/doc/examples +seedd-service_DATA_INSTALLDIR = /lib/systemd/system +seedd-service_DATA_FILES = seedd.service seedd-wait.service diff -Nru bit-babbler-0.7/Makeup/gmake-fragments/makefile.makeup bit-babbler-0.8/Makeup/gmake-fragments/makefile.makeup --- bit-babbler-0.7/Makeup/gmake-fragments/makefile.makeup 2017-05-21 09:21:57.000000000 +0000 +++ bit-babbler-0.8/Makeup/gmake-fragments/makefile.makeup 2018-02-01 09:59:14.000000000 +0000 @@ -169,7 +169,7 @@ echo '#' >> $(1); \ echo '# Copyright 2003 - 2017, Ron ' >> $(1); \ echo >> $(1); \ - echo 'MAKEUP_VERSION = 0.31' >> $(1); \ + echo 'MAKEUP_VERSION = 0.32' >> $(1); \ echo '#MAKEUP_VERBOSE = yes' >> $(1); \ echo >> $(1); \ echo 'MAKEUP_DIR = $$(top_srcdir)/Makeup' >> $(1); \ diff -Nru bit-babbler-0.7/Makeup/Makeup.conf bit-babbler-0.8/Makeup/Makeup.conf --- bit-babbler-0.7/Makeup/Makeup.conf 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/Makeup/Makeup.conf 2018-02-07 23:56:52.000000000 +0000 @@ -2,7 +2,7 @@ # # Copyright 2003 - 2017, Ron -MAKEUP_VERSION = 0.31 +MAKEUP_VERSION = 0.32 #MAKEUP_VERBOSE = yes MAKEUP_DIR = $(top_srcdir)/Makeup diff -Nru bit-babbler-0.7/munin/bit_babbler bit-babbler-0.8/munin/bit_babbler --- bit-babbler-0.7/munin/bit_babbler 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/munin/bit_babbler 1970-01-01 00:00:00.000000000 +0000 @@ -1,1140 +0,0 @@ -#!/usr/bin/perl -w - -# This file is distributed as part of the bit-babbler package. -# Copyright 2014 - 2015, Ron - -# Munin magic markers -#%# family=auto -#%# capabilities=autoconf - -use strict; - -use IO::Socket; -use JSON::XS; -use Munin::Plugin; - -my $control_socket = $ENV{'control_socket'} || "/var/run/bit-babbler/seedd.socket"; -my $json; - - -sub cmd_request($) -{ #{{{ - - my $request = shift; - - my $sock = IO::Socket::UNIX->new( - Type => SOCK_STREAM, - Peer => $control_socket - ) or die "Could not create socket: $!\n"; - - my $max_chunk_size = 65536; - my $data; - my $msg; - my $flags; - - $sock->send('"' . $request . "\"\0") or die "Failed to send '$request' request: $!\n"; - do { - $sock->recv($data,$max_chunk_size,$flags) or die "Failed to read reply: $!\n"; - $msg .= $data; - } - while( $data !~ /\0/ ); - - $json = eval { JSON::XS->new->decode($msg) }; - die "JSON decode failed: $@: $msg\n" if $@; - - if ($json->[0] ne $request) { - die "Unrecognised reply: $json->[0]\n"; - } - -} #}}} - -sub get_ids() -{ - cmd_request("GetIDs"); -} - -sub get_stats() -{ - cmd_request("ReportStats"); -} - -sub unique_list(@) { - my %h; - map { $h{$_}++ ? () : $_ } @_; -} - - -sub report_bitrate_config(@) -{ #{{{ - - print "multigraph bb_bitrate\n"; - print "graph_title BitBabbler bytes output\n"; - print "graph_vlabel Bytes/second\n"; - print "graph_category system\n"; - - for (@_) { - my $f = clean_fieldname($_); - - print "${f}_qa_passed.label $_\n"; - print "${f}_qa_passed.type COUNTER\n"; - print "${f}_qa_passed.max 1000000\n"; - print "${f}_qa_passed.info Good entropy output\n"; - } - - - for (@_) { - my $f = clean_fieldname($_); - - print "multigraph bb_bitrate.output_$f\n"; - print "graph_title BitBabbler $_ bytes output\n"; - print "graph_vlabel Bytes/second\n"; - print "graph_category system\n"; - print "graph_info This graph shows the demand for entropy (and the rate at " - . "which it is able to be delivered). The discarded rate is entropy which " - . "was read from the device, but which was not used because either the " - . "QA checks are currently failing, or they are still confirming whether " - . "a failure was really a transient anomaly or not. It is not unusual to " - . "have some entropy discarded at first start up, since the QA checking " - . "puts the initial onus on the source to prove it is good, and will not " - . "pass entropy from it until it does. The passed rate includes entropy " - . "that is output even when none is being consumed, which is used to keep " - . "the pools constantly fresh.\n"; - - print "${f}_qa_passed.label Passed\n"; - print "${f}_qa_passed.type COUNTER\n"; - print "${f}_qa_passed.max 1000000\n"; - print "${f}_qa_passed.info Good entropy output\n"; - - print "${f}_qa_unpassed.label Discarded\n"; - print "${f}_qa_unpassed.type COUNTER\n"; - print "${f}_qa_unpassed.max 1000000\n"; - print "${f}_qa_unpassed.warning 1\n"; - print "${f}_qa_unpassed.info Discarded entropy\n"; - } - -} #}}} - -sub report_bitrate_values(@) -{ #{{{ - - print "multigraph bb_bitrate\n"; - for (@_) { - my $f = clean_fieldname($_); - my $qa = $json->[2]{$_}{'QA'} if exists $json->[2]{$_}; - - print "${f}_qa_passed.value " . ($qa ? $qa->{'BytesPassed'} : "U") . "\n"; - } - - for (@_) { - my $f = clean_fieldname($_); - my $qa = $json->[2]{$_}{'QA'} if exists $json->[2]{$_}; - - print "multigraph bb_bitrate.output_$f\n"; - - if (defined $qa) { - print "${f}_qa_passed.value $qa->{'BytesPassed'}\n"; - print "${f}_qa_unpassed.value " . ($qa->{'BytesAnalysed'} - $qa->{'BytesPassed'}) . "\n"; - } else { - print "${f}_qa_passed.value U\n"; - print "${f}_qa_unpassed.value U\n"; - } - } - -} #}}} - - -sub report_ent_config(@) -{ #{{{ - - # Check if the Ent test long statistics are expected to have converged on - # their threshold limits. We don't want to bark warnings here until then. - # - # This is a bit ugly, it means we fetch the stats used to output the values - # during both the config and the fetch phases. We could cache them during - # fetch, but that's backward and the first config run will potentially read - # stale data from the cache (triggering a burst of false warnings, which is - # exactly what this extra complication is here to solve ...). We could - # instead cache them here, and read that during fetch -- but it's not clear - # that's actually simpler or in any way more efficient than just querying - # for them twice ... We could also add a simpler request to the protocol, - # that only fetches the sample counts we need to inspect here, but that may - # be overengineering our way around this too. - # - # What we do here then, is check if Ent::Limits::long_minsamples has been - # reached yet for each device we are graphing, and flag each of them that - # have. We then use that to decide whether to include the warning limits - # in the config for each graph. This avoids the situation where devices - # that aren't having a lot of data read from them, and which may take many - # hours, or even days, to reach the long test convergence thresholds, will - # be reporting 'false' warnings each time the process is restarted until - # that finally happens. With this, the warnings will only trigger in the - # cases where the test is considered to have actually failed and the output - # from the device is being suppressed. Which is the only case we really - # want to alert the admin about. Crying wolf when the test is not actually - # valid yet will just lead someone to ignore a real failure - and relaxing - # the thresholds for warnings here to avoid that would mean a real failure - # might go unnoticed for a longer period than would be ideal, or be harder - # to pinpoint the real cause when some other metric alerts them to it. - # - # So it's better to be a little bit ugly here, than a lot ugly for users. - # This could be a lot easier if munin didn't split each request into two - # separate phases that both occur for every poll. - my %warn; - - get_stats(); - - for (keys %{$json->[2]}) { - my $f = clean_fieldname($_); - my $ent8 = $json->[2]{$_}{'Ent8'}; - my $ent16 = $json->[2]{$_}{'Ent16'}; - - $warn{$f}{'Ent8'} = 1 - if (defined $ent8 && $ent8->{'Long'}{'Samples'} > 250000000); - - $warn{$f}{'Ent16'} = 1 - if (defined $ent16 && $ent16->{'Long'}{'Samples'} > 500000000); - } - - - - print "multigraph bb_ent\n"; - print "graph_title BitBabbler Ent tests\n"; - print "graph_args --alt-autoscale --alt-y-grid\n"; - print "graph_vlabel Shannon entropy (per 8 bits)\n"; - print "graph_scale no\n"; - print "graph_printf %9.6lf\n"; - print "graph_category system\n"; - - for (@_) { - my $f = clean_fieldname($_); - - print "${f}_ent_entropy_short.label $_ short term\n"; - print "${f}_ent_entropy_short.info Short term entropy estimate\n"; - - print "${f}_ent_entropy_long.label $_ long term\n"; - print "${f}_ent_entropy_long.info Long term entropy estimate\n"; - } - - - for (@_) { - my $f = clean_fieldname($_); - - print "multigraph bb_ent.chisq16_$f\n"; - print "graph_title BitBabbler $_ Chi^2 distribution (16-bit)\n"; - print "graph_args --alt-autoscale" - . " 'HRULE:66659.52#ffaaaa:Random will exceed 66659.52 less than 0.1% of the time'" - . " 'COMMENT: \\j'" - . " 'HRULE:64421.97#ffaaaa:Random will exceed 64421.97 more than 99.9% of the time'" - . " 'COMMENT: \\j'" - . "\n"; - print "graph_vlabel Chi^2\n"; - print "graph_scale no\n"; - print "graph_printf %8.2lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows the results of Pearson's Chi-squared test " - . "for short and long sequences of 16-bit samples. The short term result is " - . "a test of the 100 million most recently generated samples. The long term " - . "result is computed over all samples generated since the process being " - . "queried began.

" - . "A statistically random sequence would be expected to exceed 64421.97 99.9% " - . "of the time, 64695.73 99% of the time, and 64940.64 95% of the time." - . " A Chi-squared statistic smaller than this indicates the sample values " - . "were more uniformly distributed than would normally be expected from a " - . "random selection.

" - . "At the opposite end of expectation, it is likely to exceed 66131.63 only " - . "5% of the time, 66380.17 1% of the time, and 66659.52 just 0.1% of the time." - . " A Chi-squared statistic larger than this indicates the sample values " - . "were less uniformly distributed than would normally be expected from a " - . "random selection.

" - . "A sustained rate of results outside of these bounds for the short term " - . "test would indicate a systemic failure. Since the long term test is " - . "continually accumulating upon the same set of data, it may be expected " - . "to take fairly long duration excursions out to the extreme limits of " - . "probability before eventually returning to a more expected range.\n"; - - # Roughly 1 in 100 million chance of passing the warning thresholds - # Roughly buckley's of passing the critical thresholds in normal operation - print "${f}_ent16_chisq_short.label Short term\n"; - print "${f}_ent16_chisq_short.line " - . "64940.644:cccccc:Random will exceed 64940 more than 95% of the time\n"; - print "${f}_ent16_chisq_short.warning 321:67459\n"; - print "${f}_ent16_chisq_short.critical 35:70000\n"; - print "${f}_ent16_chisq_short.info Short term Chi^2 distribution\n"; - - print "${f}_ent16_chisq_long.label Long term\n"; - print "${f}_ent16_chisq_long.line " - . "66131.632:cccccc:Random will exceed 66131 less than 5% of the time\n"; - print "${f}_ent16_chisq_long.warning 63823:67265\n"; - print "${f}_ent16_chisq_long.critical 35:70000\n"; - print "${f}_ent16_chisq_long.info Long term Chi^2 distribution\n"; - } - - for (@_) { - my $f = clean_fieldname($_); - - print "multigraph bb_ent.chisq_$f\n"; - print "graph_title BitBabbler $_ Chi^2 distribution (8-bit)\n"; - print "graph_args --alt-autoscale" - . " 'HRULE:330.523#ffaaaa:Random will exceed 330.523 less than 0.1% of the time'" - . " 'COMMENT: \\j'" - . " 'HRULE:190.869#ffaaaa:Random will exceed 190.869 more than 99.9% of the time'" - . " 'COMMENT: \\j'" - . "\n"; - print "graph_vlabel Chi^2\n"; - print "graph_scale no\n"; - print "graph_printf %8.2lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows the results of Pearson's Chi-squared test " - . "for short and long sequences of 8-bit samples. The short term result is " - . "a test of the 500,000 most recently generated samples. The long term " - . "result is computed over all samples generated since the process being " - . "queried began.

" - . "A statistically random sequence would be expected to exceed 190.869 99.9% " - . "of the time, 205.421 99% of the time, and 219.025 95% of the time." - . " A Chi-squared statistic smaller than this indicates the sample values " - . "were more uniformly distributed than would normally be expected from a " - . "random selection.

" - . "At the opposite end of expectation, it is likely to exceed 293.248 only " - . "5% of the time, 310.457 1% of the time, and 330.523 just 0.1% of the time." - . " A Chi-squared statistic larger than this indicates the sample values " - . "were less uniformly distributed than would normally be expected from a " - . "random selection.

" - . "A sustained rate of results outside of these bounds for the short term " - . "test would indicate a systemic failure. Since the long term test is " - . "continually accumulating upon the same set of data, it may be expected " - . "to take fairly long duration excursions out to the extreme limits of " - . "probability before eventually returning to a more expected range.\n"; - - # Roughly 1 in 100 million chance of passing the warning thresholds - # Roughly buckley's of passing the critical thresholds in normal operation - print "${f}_ent_chisq_short.label Short term\n"; - print "${f}_ent_chisq_short.line " - . "219.025:cccccc:Random will exceed 219.025 more than 95% of the time\n"; - print "${f}_ent_chisq_short.warning 147:400\n"; - print "${f}_ent_chisq_short.critical 32:500\n"; - print "${f}_ent_chisq_short.info Short term Chi^2 distribution\n"; - - print "${f}_ent_chisq_long.label Long term\n"; - print "${f}_ent_chisq_long.line " - . "293.248:cccccc:Random will exceed 293.248 less than 5% of the time\n"; - print "${f}_ent_chisq_long.warning 161:377\n"; - print "${f}_ent_chisq_long.critical 32:500\n"; - print "${f}_ent_chisq_long.info Long term Chi^2 distribution\n"; - } - - - for (@_) { - my $f = clean_fieldname($_); - - print "multigraph bb_ent.entropy16_$f\n"; - print "graph_title BitBabbler $_ estimated entropy (16-bit)\n"; - print "graph_args --alt-autoscale --alt-y-grid\n"; - print "graph_vlabel Entropy (per 16 bits)\n"; - print "graph_scale no\n"; - print "graph_printf %9.6lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows the calculated Shannon and min entropy " - . "for a short term sequence of the most recent 100 million samples, and " - . "over the long term of all samples generated since the process being " - . "queried began. The Shannon entropy is based on the number of times " - . "that each possible sequence of 16 bits occurred. The min-entropy is " - . "a more conservative estimate that is based only on the number of " - . "times that the most frequent sample value was seen.

" - . "Note that when this analysis is performed on a 32-bit machine, the " - . "long term sample count will 'wrap around' well before the results " - . "can converge on their maximum expected value, and that when this does " - . "occur a small 'sawtooth' dip is expected to be seen in the results.\n"; - - print "${f}_ent16_entropy_short.label Shannon entropy short term\n"; - print "${f}_ent16_entropy_short.warning 15.9995:\n"; - print "${f}_ent16_entropy_short.critical 15.8:\n"; - print "${f}_ent16_entropy_short.info Short term Shannon entropy estimate\n"; - - print "${f}_ent16_entropy_long.label Shannon entropy long term\n"; - if (defined $warn{$f}{'Ent16'}) { - print "${f}_ent16_entropy_long.warning 15.9999:\n"; - print "${f}_ent16_entropy_long.critical 15.99:\n"; - } - print "${f}_ent16_entropy_long.info Long term Shannon entropy estimate\n"; - - print "${f}_ent16_minentropy_short.label Min-entropy short term\n"; - print "${f}_ent16_minentropy_short.warning 15.708:\n"; - print "${f}_ent16_minentropy_short.critical 15.7:\n"; - print "${f}_ent16_minentropy_short.info Short term min-entropy estimate\n"; - - print "${f}_ent16_minentropy_long.label Min-entropy long term\n"; - if (defined $warn{$f}{'Ent16'}) { - print "${f}_ent16_minentropy_long.warning 15.893:\n"; - print "${f}_ent16_minentropy_long.critical 15.8:\n"; - } - print "${f}_ent16_minentropy_long.info Long term min-entropy estimate\n"; - } - - for (@_) { - my $f = clean_fieldname($_); - - print "multigraph bb_ent.entropy_$f\n"; - print "graph_title BitBabbler $_ estimated entropy (8-bit)\n"; - print "graph_args --alt-autoscale --alt-y-grid\n"; - print "graph_vlabel Entropy (per 8 bits)\n"; - print "graph_scale no\n"; - print "graph_printf %9.6lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows the calculated Shannon and min entropy " - . "for a short term sequence of the most recent 500,000 samples, and " - . "over the long term of all samples generated since the process being " - . "queried began. The Shannon entropy is based on the number of times " - . "that each possible sequence of 8 bits occurred. The min-entropy is " - . "a more conservative estimate that is based only on the number of " - . "times that the most frequent sample value was seen.

" - . "Note that when this analysis is performed on a 32-bit machine, the " - . "long term sample count will 'wrap around' well before the min entropy " - . "results can converge on their maximum expected value, and that when " - . "this does occur a tiny 'sawtooth' dip is expected to be seen in the " - . "results.\n"; - - print "${f}_ent_entropy_short.label Shannon entropy short term\n"; - print "${f}_ent_entropy_short.warning 7.999:\n"; - print "${f}_ent_entropy_short.critical 7.8:\n"; - print "${f}_ent_entropy_short.info Short term Shannon entropy estimate\n"; - - print "${f}_ent_entropy_long.label Shannon entropy long term\n"; - if (defined $warn{$f}{'Ent8'}) { - print "${f}_ent_entropy_long.warning 7.999999:\n"; - print "${f}_ent_entropy_long.critical 7.999:\n"; - } - print "${f}_ent_entropy_long.info Long term Shannon entropy estimate\n"; - - print "${f}_ent_minentropy_short.label Min-entropy short term\n"; - print "${f}_ent_minentropy_short.warning 7.73:\n"; - print "${f}_ent_minentropy_short.critical 7.7:\n"; - print "${f}_ent_minentropy_short.info Short term min-entropy estimate\n"; - - print "${f}_ent_minentropy_long.label Min-entropy long term\n"; - if (defined $warn{$f}{'Ent8'}) { - print "${f}_ent_minentropy_long.warning 7.99:\n"; - print "${f}_ent_minentropy_long.critical 7.9:\n"; - } - print "${f}_ent_minentropy_long.info Long term min-entropy estimate\n"; - } - - - for (@_) { - my $f = clean_fieldname($_); - - print "multigraph bb_ent.mean16_$f\n"; - print "graph_title BitBabbler $_ mean value (16-bit)\n"; - if (defined $warn{$f}{'Ent16'}) { - print "graph_args --alt-autoscale --alt-y-grid\n"; - } else { - print "graph_args --alt-autoscale --alt-y-grid" - . " HRULE:32765.63#bbbbff" - . " HRULE:32769.37#bbbbff" - . "\n"; - } - print "graph_vlabel Mean of all samples\n"; - print "graph_scale no\n"; - print "graph_printf %10.6lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows a simple arithmetic mean of 16-bit samples " - . "over short and long term sequences. The short term result is a test " - . "of the 100 million most recently generated samples. The long term result " - . "is calculated over all samples generated since the process being queried " - . "began. An unbiased sequence would be expected to converge on 32767.5 over " - . "the long term, but the 16-bit mean can require a large number of samples " - . "before it does.\n"; - - print "${f}_ent16_mean_short.label Short term\n"; - print "${f}_ent16_mean_short.line 32767.5:bbbbbb\n"; - print "${f}_ent16_mean_short.warning 32759.81:32775.19\n"; - print "${f}_ent16_mean_short.critical 32757.5:32777.5\n"; - print "${f}_ent16_mean_short.info Short term mean\n"; - - print "${f}_ent16_mean_long.label Long term\n"; - if (defined $warn{$f}{'Ent16'}) { - print "${f}_ent16_mean_long.warning 32765.63:32769.37\n"; - print "${f}_ent16_mean_long.critical 32762.5:32772.5\n"; - } - print "${f}_ent16_mean_long.info Long term mean\n"; - } - - for (@_) { - my $f = clean_fieldname($_); - - print "multigraph bb_ent.mean_$f\n"; - print "graph_title BitBabbler $_ mean value (8-bit)\n"; - if (defined $warn{$f}{'Ent8'}) { - print "graph_args --alt-autoscale --alt-y-grid\n"; - } else { - print "graph_args --alt-autoscale --alt-y-grid" - . " HRULE:127.481#bbbbff" - . " HRULE:127.519#bbbbff" - . "\n"; - } - print "graph_vlabel Mean of all samples\n"; - print "graph_scale no\n"; - print "graph_printf %10.6lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows a simple arithmetic mean of 8-bit samples " - . "over short and long term sequences. The short term result is a test " - . "of the 500,000 most recently generated samples. The long term result " - . "is calculated over all samples generated since the process being queried " - . "began. An unbiased sequence would be expected to converge on 127.5 over " - . "the long term.\n"; - - print "${f}_ent_mean_short.label Short term\n"; - print "${f}_ent_mean_short.warning 126.92:128.08\n"; - print "${f}_ent_mean_short.critical 126.5:128.5\n"; - print "${f}_ent_mean_short.info Short term mean\n"; - - print "${f}_ent_mean_long.label Long term\n"; - if (defined $warn{$f}{'Ent8'}) { - print "${f}_ent_mean_long.warning 127.481:127.519\n"; - print "${f}_ent_mean_long.critical 127.0:128.0\n"; - } - print "${f}_ent_mean_long.info Long term mean\n"; - } - - - for (@_) { - my $f = clean_fieldname($_); - - print "multigraph bb_ent.pi_error_$f\n"; - print "graph_title BitBabbler $_ Monte Carlo test (24-bit)\n"; - if (defined $warn{$f}{'Ent8'}) { - print "graph_args --base 1000\n"; # Don't inherit parent args - } else { - print "graph_args --base 1000 HRULE:-0.03#bbbbff HRULE:0.03#bbbbff\n"; - } - print "graph_vlabel % error calculating Pi\n"; - print "graph_scale no\n"; - print "graph_printf %6.4lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows the error in computing the value of " - . "Pi using the 'Monte Carlo Method'. Consecutive sequences of " - . "24 bits are taken as X and Y coordinates inside a square. " - . "Since a circle inscribed in that square occupies Pi/4 of its " - . "area, then a uniformly distributed set of random points should " - . "fall inside or outside the radius of the circle with a ratio " - . "that when multiplied by 4 gives an approximation for Pi. The " - . "short term result is a test of the most recent 500,000 samples. " - . "The long term result is computed over all samples generated since " - . "the process being queried began. The results are graphed as the " - . "percentage of error relative to the real value of Pi. This test " - . "is relatively slow to converge on an accurate estimation, but a " - . "sustained or persistently diverging inaccuracy in the estimation " - . "would indicate a systemic error in the uniformity of the sample " - . "values.\n"; - - print "${f}_ent_pi_error_short.label Short term\n"; - print "${f}_ent_pi_error_short.warning -0.97:0.97\n"; - print "${f}_ent_pi_error_short.critical -2.0:2.0\n"; - print "${f}_ent_pi_error_short.info Short term error percentage\n"; - - print "${f}_ent_pi_error_long.label Long term\n"; - if (defined $warn{$f}{'Ent8'}) { - print "${f}_ent_pi_error_long.warning -0.03:0.03\n"; - print "${f}_ent_pi_error_long.critical -1.0:1.0\n"; - } - print "${f}_ent_pi_error_long.info Long term error percentage\n"; - } - - - for (@_) { - my $f = clean_fieldname($_); - - print "multigraph bb_ent.autocorr16_$f\n"; - print "graph_title BitBabbler $_ serial correlation (16-bit)\n"; - if (defined $warn{$f}{'Ent16'}) { - print "graph_args --base 1000\n"; # Don't inherit parent args - } else { - print "graph_args --base 1000 HRULE:-0.00008#bbbbff HRULE:0.00008#bbbbff\n"; - } - print "graph_vlabel Serial correlation coefficient\n"; - print "graph_scale yes\n"; - print "graph_printf %7.3lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows the autocorrelation coefficient for " - . "a lag of 1 over the sequence of samples. This gives a measure " - . "of the extent to which each sample is related to the previous one. " - . "A perfectly predictable stream will converge on a result of 1.0, " - . "and a perfectly unpredictable one will converge on a result of 0." - . "The short term result is a test of the 100 million most recently " - . "generated samples. The long term result is computed over all " - . "samples generated since the process being queried began. " - . "A sustained divergence away from 0 or values close to +/- 1 " - . "indicate a problem that ought to be investigated.\n"; - - print "${f}_ent16_autocorr_short.label Short term\n"; - print "${f}_ent16_autocorr_short.warning -0.00044:0.00044\n"; - print "${f}_ent16_autocorr_short.critical -0.005:0.005\n"; - print "${f}_ent16_autocorr_short.info Short term serial correlation\n"; - - print "${f}_ent16_autocorr_long.label Long term\n"; - if (defined $warn{$f}{'Ent16'}) { - print "${f}_ent16_autocorr_long.warning -0.00011:0.00011\n"; - print "${f}_ent16_autocorr_long.critical -0.001:0.001\n"; - } - print "${f}_ent16_autocorr_long.info Long term serial correlation\n"; - } - - for (@_) { - my $f = clean_fieldname($_); - - print "multigraph bb_ent.autocorr_$f\n"; - print "graph_title BitBabbler $_ serial correlation (8-bit)\n"; - if (defined $warn{$f}{'Ent8'}) { - print "graph_args --base 1000\n"; # Don't inherit parent args - } else { - print "graph_args --base 1000 HRULE:-0.0002#bbbbff HRULE:0.0002#bbbbff\n"; - } - print "graph_vlabel Serial correlation coefficient\n"; - print "graph_scale yes\n"; - print "graph_printf %7.3lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows the autocorrelation coefficient for " - . "a lag of 1 over the sequence of samples. This gives a measure " - . "of the extent to which each sample is related to the previous one. " - . "A perfectly predictable stream will converge on a result of 1.0, " - . "and a perfectly unpredictable one will converge on a result of 0." - . "The short term result is a test of the 500,000 most recently " - . "generated samples. The long term result is computed over all " - . "samples generated since the process being queried began. " - . "A sustained divergence away from 0 or values close to +/- 1 " - . "indicate a problem that ought to be investigated.\n"; - - print "${f}_ent_autocorr_short.label Short term\n"; - print "${f}_ent_autocorr_short.warning -0.0078:0.0078\n"; - print "${f}_ent_autocorr_short.critical -0.009:0.009\n"; - print "${f}_ent_autocorr_short.info Short term serial correlation\n"; - - print "${f}_ent_autocorr_long.label Long term\n"; - if (defined $warn{$f}{'Ent8'}) { - print "${f}_ent_autocorr_long.warning -0.00025:0.00025\n"; - print "${f}_ent_autocorr_long.critical -0.005:0.005\n"; - } - print "${f}_ent_autocorr_long.info Long term serial correlation\n"; - } - -} #}}} - -sub report_ent_values(@) -{ #{{{ - - print "multigraph bb_ent\n"; - for (@_) { - my $f = clean_fieldname($_); - my $ent = $json->[2]{$_}{'Ent8'} if exists $json->[2]{$_}; - - if (defined $ent) { - print "${f}_ent_entropy_short.value $ent->{'Short'}{'Current'}{'Entropy'}\n"; - print "${f}_ent_entropy_long.value $ent->{'Long'}{'Current'}{'Entropy'}\n"; - } else { - print "${f}_ent_entropy_short.value U\n"; - print "${f}_ent_entropy_long.value U\n"; - } - } - - - for my $n ('', '16') { - - my $e = $n ? 'Ent16' : 'Ent8'; - - for (@_) { - my $f = clean_fieldname($_); - my $ent = $json->[2]{$_}{$e} if exists $json->[2]{$_}; - - print "multigraph bb_ent.chisq${n}_$f\n"; - - if (defined $ent) { - print "${f}_ent${n}_chisq_short.value $ent->{'Short'}{'Current'}{'Chisq'}\n"; - print "${f}_ent${n}_chisq_long.value $ent->{'Long'}{'Current'}{'Chisq'}\n"; - } else { - print "${f}_ent${n}_chisq_short.value U\n"; - print "${f}_ent${n}_chisq_long.value U\n"; - } - } - - for (@_) { - my $f = clean_fieldname($_); - my $ent = $json->[2]{$_}{$e} if exists $json->[2]{$_}; - - print "multigraph bb_ent.entropy${n}_$f\n"; - - if (defined $ent) { - print "${f}_ent${n}_entropy_short.value $ent->{'Short'}{'Current'}{'Entropy'}\n"; - print "${f}_ent${n}_entropy_long.value $ent->{'Long'}{'Current'}{'Entropy'}\n"; - print "${f}_ent${n}_minentropy_short.value $ent->{'Short'}{'Current'}{'MinEntropy'}\n"; - print "${f}_ent${n}_minentropy_long.value $ent->{'Long'}{'Current'}{'MinEntropy'}\n"; - } else { - print "${f}_ent${n}_entropy_short.value U\n"; - print "${f}_ent${n}_entropy_long.value U\n"; - print "${f}_ent${n}_minentropy_short.value U\n"; - print "${f}_ent${n}_minentropy_long.value U\n"; - } - } - - for (@_) { - my $f = clean_fieldname($_); - my $ent = $json->[2]{$_}{$e} if exists $json->[2]{$_}; - - print "multigraph bb_ent.mean${n}_$f\n"; - - if (defined $ent) { - print "${f}_ent${n}_mean_short.value $ent->{'Short'}{'Current'}{'Mean'}\n"; - print "${f}_ent${n}_mean_long.value $ent->{'Long'}{'Current'}{'Mean'}\n"; - } else { - print "${f}_ent${n}_mean_short.value U\n"; - print "${f}_ent${n}_mean_long.value U\n"; - } - } - - for (@_) { - my $f = clean_fieldname($_); - my $ent = $json->[2]{$_}{$e} if exists $json->[2]{$_}; - - print "multigraph bb_ent.autocorr${n}_$f\n"; - - if (defined $ent) { - print "${f}_ent${n}_autocorr_short.value $ent->{'Short'}{'Current'}{'Autocorr'}\n"; - print "${f}_ent${n}_autocorr_long.value $ent->{'Long'}{'Current'}{'Autocorr'}\n"; - } else { - print "${f}_ent${n}_autocorr_short.value U\n"; - print "${f}_ent${n}_autocorr_long.value U\n"; - } - } - } - - for (@_) { - my $f = clean_fieldname($_); - my $ent = $json->[2]{$_}{'Ent8'} if exists $json->[2]{$_}; - - print "multigraph bb_ent.pi_error_$f\n"; - - if (defined $ent) { - print "${f}_ent_pi_error_short.value $ent->{'Short'}{'Current'}{'Pi-error'}\n"; - print "${f}_ent_pi_error_long.value $ent->{'Long'}{'Current'}{'Pi-error'}\n"; - } else { - print "${f}_ent_pi_error_short.value U\n"; - print "${f}_ent_pi_error_long.value U\n"; - } - } - -} #}}} - - -sub report_fips_pass_config(@) -{ #{{{ - - print "multigraph bb_fips_pass\n"; - print "graph_title BitBabbler FIPS 140-2 pass run length\n"; - print "graph_vlabel Consecutive tests without failure\n"; - print "graph_scale no\n"; - print "graph_printf %6.0lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows the run length between FIPS 140-2 test " - . "failures. A correctly working system should expect to see failure of " - . "the FIPS 140-2 tests about once in every 1250 blocks tested on average." - . " Occasional runs of much longer than that can be reasonably expected, " - . "with a run of 17500 or longer expected about once in 1.2 million tests " - . "(about 3.5TB of samples). A sustained lack of failures would indicate " - . "a problem that ought to be investigated.\n"; - - for (@_) { - my $f = clean_fieldname($_); - - print "${f}_pass_avg_short.label $_\n"; - print "${f}_pass_avg_short.info Short term average run of tests without failure\n"; - } - - - print "multigraph bb_fips_pass.longest\n"; - print "graph_title BitBabbler FIPS 140-2 longest pass run\n"; - print "graph_scale no\n"; - print "graph_printf %6.0lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows the longest run of consecutive blocks " - . "without a FIPS 140-2 test failure, since the process being queried " - . "began. A run of 17500 or longer is expected about once in 1.2 million " - . "blocks tested (about 3.5TB of samples), but runs longer than that are " - . "not impossible, just increasingly rare. The average rate graph is a " - . "better measure of correct operation than this one, but consistently " - . "unusual results for the peak run length would be something that ought " - . "to be investigated more closely.\n"; - - for (@_) { - my $f = clean_fieldname($_); - - print "${f}_pass_max.label $_\n"; - print "${f}_pass_max.info Longest run of tests without failure\n"; - } - - for (@_) { - my $f = clean_fieldname($_); - - print "multigraph bb_fips_pass.qa_$f\n"; - print "graph_title BitBabbler $_ FIPS 140-2 pass run length\n"; - print "graph_scale no\n"; - print "graph_printf %6.0lf\n"; - print "graph_category system\n"; - - print "${f}_pass_avg_short.label Short term average\n"; - print "${f}_pass_avg_short.warning 20000\n"; - print "${f}_pass_avg_short.info Average run of tests without failure\n"; - - print "${f}_pass_avg_long.label Long term average\n"; - print "${f}_pass_avg_long.warning 20000\n"; - print "${f}_pass_avg_long.info Average run of tests without failure\n"; - } - -} #}}} - -sub report_fips_pass_values(@) -{ #{{{ - - print "multigraph bb_fips_pass\n"; - for (@_) { - my $f = clean_fieldname($_); - my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; - - print "${f}_pass_avg_short.value " - . ($fips ? $fips->{'Result'}{'PassRuns'}{'Short'} : "U") . "\n"; - } - - print "multigraph bb_fips_pass.longest\n"; - for (@_) { - my $f = clean_fieldname($_); - my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; - - print "${f}_pass_max.value " - . ($fips ? $fips->{'Result'}{'PassRuns'}{'Peak'} : "U") . "\n"; - } - - for (@_) { - my $f = clean_fieldname($_); - my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; - - print "multigraph bb_fips_pass.qa_$f\n"; - - if (defined $fips) { - print "${f}_pass_avg_short.value $fips->{'Result'}{'PassRuns'}{'Short'}\n"; - print "${f}_pass_avg_long.value $fips->{'Result'}{'PassRuns'}{'Long'}\n"; - } else { - print "${f}_pass_avg_short.value U\n"; - print "${f}_pass_avg_long.value U\n"; - } - } - -} #}}} - - -sub report_fips_fail_config(@) -{ #{{{ - - print "multigraph bb_fips_fail\n"; - print "graph_title BitBabbler FIPS 140-2 testing\n"; - print "graph_vlabel Failed per 1000: long(-) / short(+) term\n"; - print "graph_scale no\n"; - print "graph_printf %6.4lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows the long and short term failure rates " - . "for the FIPS 140-2 tests on each source. The short term average " - . "tracks a window of the last 1000 tests. A correctly working system " - . "should expect to converge on just under 0.8 failures per thousand as " - . "the long term trend, with with the short term average varying from 0 " - . "with occasional peaks over 5 (as the rare, but not quite infinitely " - . "improbable, rough upper bound). A sustained short term rate greater " - . "than that would indicate a systemic failure.\n"; - - my $first = 1; - - for (@_) { - my $f = clean_fieldname($_); - - print "${f}_l.label $_\n"; - print "${f}_l.graph no\n"; - print "${f}_l.line -0.829:bbbbbb\n" if $first; - print "${f}_l.info Long term average rate of failures\n"; - - print "${f}_s.label $_\n"; - print "${f}_s.negative ${f}_l\n"; - print "${f}_s.line 0.829:bbbbbb:Expected average rate\n" if $first; - print "${f}_s.info Short term rolling average rate of failures\n"; - - $first = 0; - } - - - for (@_) { - my $f = clean_fieldname($_); - - print "multigraph bb_fips_fail.qa_$f\n"; - print "graph_title BitBabbler $_ average FIPS 140-2 failure rate\n"; - print "graph_vlabel Failed per 1000: long(-) / short(+) term\n"; - print "graph_scale no\n"; - print "graph_printf %6.4lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows the long and short term failure rates " - . "for the FIPS 140-2 tests. The short term average tracks a window " - . "of the last 1000 tests. A correctly working system should expect " - . "to converge on around 0.8 failures per thousand (of any test) as " - . "the long term trend, with with the short term average varying from " - . "0 with occasional peaks over 5 (as the rare, but not infinitely " - . "improbable, rough upper bound). A sustained short term rate greater " - . "than that would indicate a systemic failure. The expected (long term) " - . "rates of failure for each individual test are indicated below.\n"; - - print "${f}_l.label Failure rate\n"; - print "${f}_l.graph no\n"; - print "${f}_l.line -0.829:bbbbbb\n"; - - print "${f}_s.label Failure rate\n"; - print "${f}_s.negative ${f}_l\n"; - print "${f}_s.line 0.829:bbbbbb:Expected average rate\n"; - print "${f}_s.info Failure of any test\n"; - print "${f}_s.warning 5.5\n"; - print "${f}_s.critical 10.0\n"; - - - print "${f}_monl.label Monobit\n"; - print "${f}_monl.graph no\n"; - - print "${f}_mons.label Monobit\n"; - print "${f}_mons.negative ${f}_monl\n"; - print "${f}_mons.info Expect 0.104 per 1000\n"; - - - print "${f}_pokl.label Poker\n"; - print "${f}_pokl.graph no\n"; - - print "${f}_poks.label Poker\n"; - print "${f}_poks.negative ${f}_pokl\n"; - print "${f}_poks.info Expect 0.099 per 1000\n"; - - - print "${f}_runl.label Runs\n"; - print "${f}_runl.graph no\n"; - - print "${f}_runs.label Runs\n"; - print "${f}_runs.negative ${f}_runl\n"; - print "${f}_runs.info Expect 0.328 per 1000\n"; - - - print "${f}_lrl.label Long run\n"; - print "${f}_lrl.graph no\n"; - - print "${f}_lrs.label Long run\n"; - print "${f}_lrs.negative ${f}_lrl\n"; - print "${f}_lrs.info Expect 0.298 per 1000\n"; - - - print "${f}_repl.label Repetition\n"; - print "${f}_repl.graph no\n"; - - print "${f}_reps.label Repetition\n"; - print "${f}_reps.negative ${f}_repl\n"; - print "${f}_reps.info Expect to be very rare\n"; - } - - for (@_) { - my $f = clean_fieldname($_); - - print "multigraph bb_fips_fail.peak_$f\n"; - print "graph_title BitBabbler $_ peak FIPS 140-2 failure rate\n"; - print "graph_vlabel Max failure rate (per 1000 tests)\n"; - print "graph_scale no\n"; - print "graph_printf %6.4lf\n"; - print "graph_category system\n"; - print "graph_info This graph shows the worst case failure rates for the " - . "FIPS 140-2 tests since the process we are querying began. These " - . "are the peak values seen as the short term average over a window " - . "of the last 1000 tests.\n"; - - print "${f}_p.label Failure rate\n"; - print "${f}_p.info Failure of any test\n"; - - print "${f}_monp.label Monobit\n"; - print "${f}_pokp.label Poker\n"; - print "${f}_runp.label Runs\n"; - print "${f}_lrp.label Long run\n"; - print "${f}_repp.label Repetition\n"; - } - -} #}}} - -sub report_fips_fail_values(@) -{ #{{{ - - print "multigraph bb_fips_fail\n"; - for (@_) { - my $f = clean_fieldname($_); - my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; - - if (defined $fips) { - print "${f}_l.value " . $fips->{'Result'}{'FailRate'}{'Long'} * 1000 . "\n"; - print "${f}_s.value " . $fips->{'Result'}{'FailRate'}{'Short'} * 1000 . "\n"; - } else { - print "${f}_l.value U\n"; - print "${f}_s.value U\n"; - } - } - - for (@_) { - my $f = clean_fieldname($_); - my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; - - print "multigraph bb_fips_fail.qa_$f\n"; - - if (defined $fips) { - print "${f}_l.value " . $fips->{'Result'}{'FailRate'}{'Long'} * 1000 . "\n"; - print "${f}_s.value " . $fips->{'Result'}{'FailRate'}{'Short'} * 1000 . "\n"; - - print "${f}_monl.value " . $fips->{'Monobit'}{'FailRate'}{'Long'} * 1000 . "\n"; - print "${f}_mons.value " . $fips->{'Monobit'}{'FailRate'}{'Short'} * 1000 . "\n"; - - print "${f}_pokl.value " . $fips->{'Poker'}{'FailRate'}{'Long'} * 1000 . "\n"; - print "${f}_poks.value " . $fips->{'Poker'}{'FailRate'}{'Short'} * 1000 . "\n"; - - print "${f}_runl.value " . $fips->{'Runs'}{'FailRate'}{'Long'} * 1000 . "\n"; - print "${f}_runs.value " . $fips->{'Runs'}{'FailRate'}{'Short'} * 1000 . "\n"; - - print "${f}_lrl.value " . $fips->{'Long run'}{'FailRate'}{'Long'} * 1000 . "\n"; - print "${f}_lrs.value " . $fips->{'Long run'}{'FailRate'}{'Short'} * 1000 . "\n"; - - print "${f}_repl.value " . $fips->{'Repetition'}{'FailRate'}{'Long'} * 1000 . "\n"; - print "${f}_reps.value " . $fips->{'Repetition'}{'FailRate'}{'Short'} * 1000 . "\n"; - } else { - print "${f}_l.value U\n"; - print "${f}_s.value U\n"; - - print "${f}_monl.value U\n"; - print "${f}_mons.value U\n"; - - print "${f}_pokl.value U\n"; - print "${f}_poks.value U\n"; - - print "${f}_runl.value U\n"; - print "${f}_runs.value U\n"; - - print "${f}_lrl.value U\n"; - print "${f}_lrs.value U\n"; - - print "${f}_repl.value U\n"; - print "${f}_reps.value U\n"; - } - } - - for (@_) { - my $f = clean_fieldname($_); - my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; - - print "multigraph bb_fips_fail.peak_$f\n"; - - if (defined $fips) { - print "${f}_p.value " . $fips->{'Result'}{'FailRate'}{'Peak'} * 1000 . "\n"; - print "${f}_monp.value " . $fips->{'Monobit'}{'FailRate'}{'Peak'} * 1000 . "\n"; - print "${f}_pokp.value " . $fips->{'Poker'}{'FailRate'}{'Peak'} * 1000 . "\n"; - print "${f}_runp.value " . $fips->{'Runs'}{'FailRate'}{'Peak'} * 1000 . "\n"; - print "${f}_lrp.value " . $fips->{'Long run'}{'FailRate'}{'Peak'} * 1000 . "\n"; - print "${f}_repp.value " . $fips->{'Repetition'}{'FailRate'}{'Peak'} * 1000 . "\n"; - } else { - print "${f}_p.value U\n"; - print "${f}_monp.value U\n"; - print "${f}_pokp.value U\n"; - print "${f}_runp.value U\n"; - print "${f}_lrp.value U\n"; - print "${f}_repp.value U\n"; - } - } - -} #}}} - - -sub report_config() -{ #{{{ - - my $persist = $ENV{'persist_devices'} || "no"; - my @sources; - - @sources = restore_state() if $persist eq "yes"; - - eval { - get_ids(); - @sources = unique_list(@sources, @{$json->[2]}); - }; - - if (exists $ENV{'always_ignore'}) { - my %ignore; - my @remains; - - @ignore{split(' ',$ENV{'always_ignore'})} = (); - - for (@sources) { - push(@remains, $_) unless exists $ignore{$_}; - } - - @sources = @remains; - } - - save_state(@sources) if $persist eq "yes"; - - @sources = unique_list(@sources, split(' ',$ENV{'always_include'})) if $ENV{'always_include'}; - - report_bitrate_config(@sources); - report_ent_config(@sources); - report_fips_pass_config(@sources); - report_fips_fail_config(@sources); - -} #}}} - -sub report_values() -{ #{{{ - - get_stats(); - - my @sources; - - if (($ENV{'persist_devices'} || "") eq "yes") { - @sources = restore_state(); - } else { - @sources = keys %{$json->[2]}; - - if (exists $ENV{'always_ignore'}) { - my %ignore; - my @remains; - - @ignore{split(' ',$ENV{'always_ignore'})} = (); - - for (@sources) { - push(@remains, $_) unless exists $ignore{$_}; - } - - @sources = @remains; - } - } - - @sources = unique_list(@sources, split(' ',$ENV{'always_include'})) if $ENV{'always_include'}; - - report_bitrate_values(@sources); - report_ent_values(@sources); - report_fips_pass_values(@sources); - report_fips_fail_values(@sources); - -} #}}} - - -if (!defined $ARGV[0]) { - report_values(); -} -elsif ($ARGV[0] eq "config") { - report_config(); -} -elsif ($ARGV[0] eq "autoconf") { - # If the package providing this is installed, we presume you're going to - # want it enabled if munin-node is also installed. We could make this a - # bit more nuanced if this script is ever installed on a lot of systems - # where that isn't likely to be true. - print "yes\n"; -} - -# vi:sts=4:sw=4:et:foldmethod=marker diff -Nru bit-babbler-0.7/munin/bit_babbler.in bit-babbler-0.8/munin/bit_babbler.in --- bit-babbler-0.7/munin/bit_babbler.in 1970-01-01 00:00:00.000000000 +0000 +++ bit-babbler-0.8/munin/bit_babbler.in 2018-02-07 23:56:52.000000000 +0000 @@ -0,0 +1,1140 @@ +#!/usr/bin/perl -w + +# This file is distributed as part of the bit-babbler package. +# Copyright 2014 - 2018, Ron + +# Munin magic markers +#%# family=auto +#%# capabilities=autoconf + +use strict; + +use IO::Socket; +use JSON::XS; +use Munin::Plugin; + +my $control_socket = $ENV{'control_socket'} || "@SEEDD_CONTROL_SOCKET@"; +my $json; + + +sub cmd_request($) +{ #{{{ + + my $request = shift; + + my $sock = IO::Socket::UNIX->new( + Type => SOCK_STREAM, + Peer => $control_socket + ) or die "Could not create socket: $!\n"; + + my $max_chunk_size = 65536; + my $data; + my $msg; + my $flags; + + $sock->send('"' . $request . "\"\0") or die "Failed to send '$request' request: $!\n"; + do { + $sock->recv($data,$max_chunk_size,$flags) or die "Failed to read reply: $!\n"; + $msg .= $data; + } + while( $data !~ /\0/ ); + + $json = eval { JSON::XS->new->decode($msg) }; + die "JSON decode failed: $@: $msg\n" if $@; + + if ($json->[0] ne $request) { + die "Unrecognised reply: $json->[0]\n"; + } + +} #}}} + +sub get_ids() +{ + cmd_request("GetIDs"); +} + +sub get_stats() +{ + cmd_request("ReportStats"); +} + +sub unique_list(@) { + my %h; + map { $h{$_}++ ? () : $_ } @_; +} + + +sub report_bitrate_config(@) +{ #{{{ + + print "multigraph bb_bitrate\n"; + print "graph_title BitBabbler bytes output\n"; + print "graph_vlabel Bytes/second\n"; + print "graph_category system\n"; + + for (@_) { + my $f = clean_fieldname($_); + + print "${f}_qa_passed.label $_\n"; + print "${f}_qa_passed.type COUNTER\n"; + print "${f}_qa_passed.max 1000000\n"; + print "${f}_qa_passed.info Good entropy output\n"; + } + + + for (@_) { + my $f = clean_fieldname($_); + + print "multigraph bb_bitrate.output_$f\n"; + print "graph_title BitBabbler $_ bytes output\n"; + print "graph_vlabel Bytes/second\n"; + print "graph_category system\n"; + print "graph_info This graph shows the demand for entropy (and the rate at " + . "which it is able to be delivered). The discarded rate is entropy which " + . "was read from the device, but which was not used because either the " + . "QA checks are currently failing, or they are still confirming whether " + . "a failure was really a transient anomaly or not. It is not unusual to " + . "have some entropy discarded at first start up, since the QA checking " + . "puts the initial onus on the source to prove it is good, and will not " + . "pass entropy from it until it does. The passed rate includes entropy " + . "that is output even when none is being consumed, which is used to keep " + . "the pools constantly fresh.\n"; + + print "${f}_qa_passed.label Passed\n"; + print "${f}_qa_passed.type COUNTER\n"; + print "${f}_qa_passed.max 1000000\n"; + print "${f}_qa_passed.info Good entropy output\n"; + + print "${f}_qa_unpassed.label Discarded\n"; + print "${f}_qa_unpassed.type COUNTER\n"; + print "${f}_qa_unpassed.max 1000000\n"; + print "${f}_qa_unpassed.warning 1\n"; + print "${f}_qa_unpassed.info Discarded entropy\n"; + } + +} #}}} + +sub report_bitrate_values(@) +{ #{{{ + + print "multigraph bb_bitrate\n"; + for (@_) { + my $f = clean_fieldname($_); + my $qa = $json->[2]{$_}{'QA'} if exists $json->[2]{$_}; + + print "${f}_qa_passed.value " . ($qa ? $qa->{'BytesPassed'} : "U") . "\n"; + } + + for (@_) { + my $f = clean_fieldname($_); + my $qa = $json->[2]{$_}{'QA'} if exists $json->[2]{$_}; + + print "multigraph bb_bitrate.output_$f\n"; + + if (defined $qa) { + print "${f}_qa_passed.value $qa->{'BytesPassed'}\n"; + print "${f}_qa_unpassed.value " . ($qa->{'BytesAnalysed'} - $qa->{'BytesPassed'}) . "\n"; + } else { + print "${f}_qa_passed.value U\n"; + print "${f}_qa_unpassed.value U\n"; + } + } + +} #}}} + + +sub report_ent_config(@) +{ #{{{ + + # Check if the Ent test long statistics are expected to have converged on + # their threshold limits. We don't want to bark warnings here until then. + # + # This is a bit ugly, it means we fetch the stats used to output the values + # during both the config and the fetch phases. We could cache them during + # fetch, but that's backward and the first config run will potentially read + # stale data from the cache (triggering a burst of false warnings, which is + # exactly what this extra complication is here to solve ...). We could + # instead cache them here, and read that during fetch -- but it's not clear + # that's actually simpler or in any way more efficient than just querying + # for them twice ... We could also add a simpler request to the protocol, + # that only fetches the sample counts we need to inspect here, but that may + # be overengineering our way around this too. + # + # What we do here then, is check if Ent::Limits::long_minsamples has been + # reached yet for each device we are graphing, and flag each of them that + # have. We then use that to decide whether to include the warning limits + # in the config for each graph. This avoids the situation where devices + # that aren't having a lot of data read from them, and which may take many + # hours, or even days, to reach the long test convergence thresholds, will + # be reporting 'false' warnings each time the process is restarted until + # that finally happens. With this, the warnings will only trigger in the + # cases where the test is considered to have actually failed and the output + # from the device is being suppressed. Which is the only case we really + # want to alert the admin about. Crying wolf when the test is not actually + # valid yet will just lead someone to ignore a real failure - and relaxing + # the thresholds for warnings here to avoid that would mean a real failure + # might go unnoticed for a longer period than would be ideal, or be harder + # to pinpoint the real cause when some other metric alerts them to it. + # + # So it's better to be a little bit ugly here, than a lot ugly for users. + # This could be a lot easier if munin didn't split each request into two + # separate phases that both occur for every poll. + my %warn; + + get_stats(); + + for (keys %{$json->[2]}) { + my $f = clean_fieldname($_); + my $ent8 = $json->[2]{$_}{'Ent8'}; + my $ent16 = $json->[2]{$_}{'Ent16'}; + + $warn{$f}{'Ent8'} = 1 + if (defined $ent8 && $ent8->{'Long'}{'Samples'} > 250000000); + + $warn{$f}{'Ent16'} = 1 + if (defined $ent16 && $ent16->{'Long'}{'Samples'} > 500000000); + } + + + + print "multigraph bb_ent\n"; + print "graph_title BitBabbler Ent tests\n"; + print "graph_args --alt-autoscale --alt-y-grid\n"; + print "graph_vlabel Shannon entropy (per 8 bits)\n"; + print "graph_scale no\n"; + print "graph_printf %9.6lf\n"; + print "graph_category system\n"; + + for (@_) { + my $f = clean_fieldname($_); + + print "${f}_ent_entropy_short.label $_ short term\n"; + print "${f}_ent_entropy_short.info Short term entropy estimate\n"; + + print "${f}_ent_entropy_long.label $_ long term\n"; + print "${f}_ent_entropy_long.info Long term entropy estimate\n"; + } + + + for (@_) { + my $f = clean_fieldname($_); + + print "multigraph bb_ent.chisq16_$f\n"; + print "graph_title BitBabbler $_ Chi^2 distribution (16-bit)\n"; + print "graph_args --alt-autoscale" + . " 'HRULE:66659.52#ffaaaa:Random will exceed 66659.52 less than 0.1% of the time'" + . " 'COMMENT: \\j'" + . " 'HRULE:64421.97#ffaaaa:Random will exceed 64421.97 more than 99.9% of the time'" + . " 'COMMENT: \\j'" + . "\n"; + print "graph_vlabel Chi^2\n"; + print "graph_scale no\n"; + print "graph_printf %8.2lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows the results of Pearson's Chi-squared test " + . "for short and long sequences of 16-bit samples. The short term result is " + . "a test of the 100 million most recently generated samples. The long term " + . "result is computed over all samples generated since the process being " + . "queried began.

" + . "A statistically random sequence would be expected to exceed 64421.97 99.9% " + . "of the time, 64695.73 99% of the time, and 64940.64 95% of the time." + . " A Chi-squared statistic smaller than this indicates the sample values " + . "were more uniformly distributed than would normally be expected from a " + . "random selection.

" + . "At the opposite end of expectation, it is likely to exceed 66131.63 only " + . "5% of the time, 66380.17 1% of the time, and 66659.52 just 0.1% of the time." + . " A Chi-squared statistic larger than this indicates the sample values " + . "were less uniformly distributed than would normally be expected from a " + . "random selection.

" + . "A sustained rate of results outside of these bounds for the short term " + . "test would indicate a systemic failure. Since the long term test is " + . "continually accumulating upon the same set of data, it may be expected " + . "to take fairly long duration excursions out to the extreme limits of " + . "probability before eventually returning to a more expected range.\n"; + + # Roughly 1 in 100 million chance of passing the warning thresholds + # Roughly buckley's of passing the critical thresholds in normal operation + print "${f}_ent16_chisq_short.label Short term\n"; + print "${f}_ent16_chisq_short.line " + . "64940.644:cccccc:Random will exceed 64940 more than 95% of the time\n"; + print "${f}_ent16_chisq_short.warning 321:67459\n"; + print "${f}_ent16_chisq_short.critical 35:70000\n"; + print "${f}_ent16_chisq_short.info Short term Chi^2 distribution\n"; + + print "${f}_ent16_chisq_long.label Long term\n"; + print "${f}_ent16_chisq_long.line " + . "66131.632:cccccc:Random will exceed 66131 less than 5% of the time\n"; + print "${f}_ent16_chisq_long.warning 63823:67265\n"; + print "${f}_ent16_chisq_long.critical 35:70000\n"; + print "${f}_ent16_chisq_long.info Long term Chi^2 distribution\n"; + } + + for (@_) { + my $f = clean_fieldname($_); + + print "multigraph bb_ent.chisq_$f\n"; + print "graph_title BitBabbler $_ Chi^2 distribution (8-bit)\n"; + print "graph_args --alt-autoscale" + . " 'HRULE:330.523#ffaaaa:Random will exceed 330.523 less than 0.1% of the time'" + . " 'COMMENT: \\j'" + . " 'HRULE:190.869#ffaaaa:Random will exceed 190.869 more than 99.9% of the time'" + . " 'COMMENT: \\j'" + . "\n"; + print "graph_vlabel Chi^2\n"; + print "graph_scale no\n"; + print "graph_printf %8.2lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows the results of Pearson's Chi-squared test " + . "for short and long sequences of 8-bit samples. The short term result is " + . "a test of the 500,000 most recently generated samples. The long term " + . "result is computed over all samples generated since the process being " + . "queried began.

" + . "A statistically random sequence would be expected to exceed 190.869 99.9% " + . "of the time, 205.421 99% of the time, and 219.025 95% of the time." + . " A Chi-squared statistic smaller than this indicates the sample values " + . "were more uniformly distributed than would normally be expected from a " + . "random selection.

" + . "At the opposite end of expectation, it is likely to exceed 293.248 only " + . "5% of the time, 310.457 1% of the time, and 330.523 just 0.1% of the time." + . " A Chi-squared statistic larger than this indicates the sample values " + . "were less uniformly distributed than would normally be expected from a " + . "random selection.

" + . "A sustained rate of results outside of these bounds for the short term " + . "test would indicate a systemic failure. Since the long term test is " + . "continually accumulating upon the same set of data, it may be expected " + . "to take fairly long duration excursions out to the extreme limits of " + . "probability before eventually returning to a more expected range.\n"; + + # Roughly 1 in 100 million chance of passing the warning thresholds + # Roughly buckley's of passing the critical thresholds in normal operation + print "${f}_ent_chisq_short.label Short term\n"; + print "${f}_ent_chisq_short.line " + . "219.025:cccccc:Random will exceed 219.025 more than 95% of the time\n"; + print "${f}_ent_chisq_short.warning 147:400\n"; + print "${f}_ent_chisq_short.critical 32:500\n"; + print "${f}_ent_chisq_short.info Short term Chi^2 distribution\n"; + + print "${f}_ent_chisq_long.label Long term\n"; + print "${f}_ent_chisq_long.line " + . "293.248:cccccc:Random will exceed 293.248 less than 5% of the time\n"; + print "${f}_ent_chisq_long.warning 161:377\n"; + print "${f}_ent_chisq_long.critical 32:500\n"; + print "${f}_ent_chisq_long.info Long term Chi^2 distribution\n"; + } + + + for (@_) { + my $f = clean_fieldname($_); + + print "multigraph bb_ent.entropy16_$f\n"; + print "graph_title BitBabbler $_ estimated entropy (16-bit)\n"; + print "graph_args --alt-autoscale --alt-y-grid\n"; + print "graph_vlabel Entropy (per 16 bits)\n"; + print "graph_scale no\n"; + print "graph_printf %9.6lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows the calculated Shannon and min entropy " + . "for a short term sequence of the most recent 100 million samples, and " + . "over the long term of all samples generated since the process being " + . "queried began. The Shannon entropy is based on the number of times " + . "that each possible sequence of 16 bits occurred. The min-entropy is " + . "a more conservative estimate that is based only on the number of " + . "times that the most frequent sample value was seen.

" + . "Note that when this analysis is performed on a 32-bit machine, the " + . "long term sample count will 'wrap around' well before the results " + . "can converge on their maximum expected value, and that when this does " + . "occur a small 'sawtooth' dip is expected to be seen in the results.\n"; + + print "${f}_ent16_entropy_short.label Shannon entropy short term\n"; + print "${f}_ent16_entropy_short.warning 15.9995:\n"; + print "${f}_ent16_entropy_short.critical 15.8:\n"; + print "${f}_ent16_entropy_short.info Short term Shannon entropy estimate\n"; + + print "${f}_ent16_entropy_long.label Shannon entropy long term\n"; + if (defined $warn{$f}{'Ent16'}) { + print "${f}_ent16_entropy_long.warning 15.9999:\n"; + print "${f}_ent16_entropy_long.critical 15.99:\n"; + } + print "${f}_ent16_entropy_long.info Long term Shannon entropy estimate\n"; + + print "${f}_ent16_minentropy_short.label Min-entropy short term\n"; + print "${f}_ent16_minentropy_short.warning 15.708:\n"; + print "${f}_ent16_minentropy_short.critical 15.7:\n"; + print "${f}_ent16_minentropy_short.info Short term min-entropy estimate\n"; + + print "${f}_ent16_minentropy_long.label Min-entropy long term\n"; + if (defined $warn{$f}{'Ent16'}) { + print "${f}_ent16_minentropy_long.warning 15.893:\n"; + print "${f}_ent16_minentropy_long.critical 15.8:\n"; + } + print "${f}_ent16_minentropy_long.info Long term min-entropy estimate\n"; + } + + for (@_) { + my $f = clean_fieldname($_); + + print "multigraph bb_ent.entropy_$f\n"; + print "graph_title BitBabbler $_ estimated entropy (8-bit)\n"; + print "graph_args --alt-autoscale --alt-y-grid\n"; + print "graph_vlabel Entropy (per 8 bits)\n"; + print "graph_scale no\n"; + print "graph_printf %9.6lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows the calculated Shannon and min entropy " + . "for a short term sequence of the most recent 500,000 samples, and " + . "over the long term of all samples generated since the process being " + . "queried began. The Shannon entropy is based on the number of times " + . "that each possible sequence of 8 bits occurred. The min-entropy is " + . "a more conservative estimate that is based only on the number of " + . "times that the most frequent sample value was seen.

" + . "Note that when this analysis is performed on a 32-bit machine, the " + . "long term sample count will 'wrap around' well before the min entropy " + . "results can converge on their maximum expected value, and that when " + . "this does occur a tiny 'sawtooth' dip is expected to be seen in the " + . "results.\n"; + + print "${f}_ent_entropy_short.label Shannon entropy short term\n"; + print "${f}_ent_entropy_short.warning 7.999:\n"; + print "${f}_ent_entropy_short.critical 7.8:\n"; + print "${f}_ent_entropy_short.info Short term Shannon entropy estimate\n"; + + print "${f}_ent_entropy_long.label Shannon entropy long term\n"; + if (defined $warn{$f}{'Ent8'}) { + print "${f}_ent_entropy_long.warning 7.999999:\n"; + print "${f}_ent_entropy_long.critical 7.999:\n"; + } + print "${f}_ent_entropy_long.info Long term Shannon entropy estimate\n"; + + print "${f}_ent_minentropy_short.label Min-entropy short term\n"; + print "${f}_ent_minentropy_short.warning 7.73:\n"; + print "${f}_ent_minentropy_short.critical 7.7:\n"; + print "${f}_ent_minentropy_short.info Short term min-entropy estimate\n"; + + print "${f}_ent_minentropy_long.label Min-entropy long term\n"; + if (defined $warn{$f}{'Ent8'}) { + print "${f}_ent_minentropy_long.warning 7.99:\n"; + print "${f}_ent_minentropy_long.critical 7.9:\n"; + } + print "${f}_ent_minentropy_long.info Long term min-entropy estimate\n"; + } + + + for (@_) { + my $f = clean_fieldname($_); + + print "multigraph bb_ent.mean16_$f\n"; + print "graph_title BitBabbler $_ mean value (16-bit)\n"; + if (defined $warn{$f}{'Ent16'}) { + print "graph_args --alt-autoscale --alt-y-grid\n"; + } else { + print "graph_args --alt-autoscale --alt-y-grid" + . " HRULE:32765.63#bbbbff" + . " HRULE:32769.37#bbbbff" + . "\n"; + } + print "graph_vlabel Mean of all samples\n"; + print "graph_scale no\n"; + print "graph_printf %10.6lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows a simple arithmetic mean of 16-bit samples " + . "over short and long term sequences. The short term result is a test " + . "of the 100 million most recently generated samples. The long term result " + . "is calculated over all samples generated since the process being queried " + . "began. An unbiased sequence would be expected to converge on 32767.5 over " + . "the long term, but the 16-bit mean can require a large number of samples " + . "before it does.\n"; + + print "${f}_ent16_mean_short.label Short term\n"; + print "${f}_ent16_mean_short.line 32767.5:bbbbbb\n"; + print "${f}_ent16_mean_short.warning 32759.81:32775.19\n"; + print "${f}_ent16_mean_short.critical 32757.5:32777.5\n"; + print "${f}_ent16_mean_short.info Short term mean\n"; + + print "${f}_ent16_mean_long.label Long term\n"; + if (defined $warn{$f}{'Ent16'}) { + print "${f}_ent16_mean_long.warning 32765.63:32769.37\n"; + print "${f}_ent16_mean_long.critical 32762.5:32772.5\n"; + } + print "${f}_ent16_mean_long.info Long term mean\n"; + } + + for (@_) { + my $f = clean_fieldname($_); + + print "multigraph bb_ent.mean_$f\n"; + print "graph_title BitBabbler $_ mean value (8-bit)\n"; + if (defined $warn{$f}{'Ent8'}) { + print "graph_args --alt-autoscale --alt-y-grid\n"; + } else { + print "graph_args --alt-autoscale --alt-y-grid" + . " HRULE:127.481#bbbbff" + . " HRULE:127.519#bbbbff" + . "\n"; + } + print "graph_vlabel Mean of all samples\n"; + print "graph_scale no\n"; + print "graph_printf %10.6lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows a simple arithmetic mean of 8-bit samples " + . "over short and long term sequences. The short term result is a test " + . "of the 500,000 most recently generated samples. The long term result " + . "is calculated over all samples generated since the process being queried " + . "began. An unbiased sequence would be expected to converge on 127.5 over " + . "the long term.\n"; + + print "${f}_ent_mean_short.label Short term\n"; + print "${f}_ent_mean_short.warning 126.92:128.08\n"; + print "${f}_ent_mean_short.critical 126.5:128.5\n"; + print "${f}_ent_mean_short.info Short term mean\n"; + + print "${f}_ent_mean_long.label Long term\n"; + if (defined $warn{$f}{'Ent8'}) { + print "${f}_ent_mean_long.warning 127.481:127.519\n"; + print "${f}_ent_mean_long.critical 127.0:128.0\n"; + } + print "${f}_ent_mean_long.info Long term mean\n"; + } + + + for (@_) { + my $f = clean_fieldname($_); + + print "multigraph bb_ent.pi_error_$f\n"; + print "graph_title BitBabbler $_ Monte Carlo test (24-bit)\n"; + if (defined $warn{$f}{'Ent8'}) { + print "graph_args --base 1000\n"; # Don't inherit parent args + } else { + print "graph_args --base 1000 HRULE:-0.03#bbbbff HRULE:0.03#bbbbff\n"; + } + print "graph_vlabel % error calculating Pi\n"; + print "graph_scale no\n"; + print "graph_printf %6.4lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows the error in computing the value of " + . "Pi using the 'Monte Carlo Method'. Consecutive sequences of " + . "24 bits are taken as X and Y coordinates inside a square. " + . "Since a circle inscribed in that square occupies Pi/4 of its " + . "area, then a uniformly distributed set of random points should " + . "fall inside or outside the radius of the circle with a ratio " + . "that when multiplied by 4 gives an approximation for Pi. The " + . "short term result is a test of the most recent 500,000 samples. " + . "The long term result is computed over all samples generated since " + . "the process being queried began. The results are graphed as the " + . "percentage of error relative to the real value of Pi. This test " + . "is relatively slow to converge on an accurate estimation, but a " + . "sustained or persistently diverging inaccuracy in the estimation " + . "would indicate a systemic error in the uniformity of the sample " + . "values.\n"; + + print "${f}_ent_pi_error_short.label Short term\n"; + print "${f}_ent_pi_error_short.warning -0.97:0.97\n"; + print "${f}_ent_pi_error_short.critical -2.0:2.0\n"; + print "${f}_ent_pi_error_short.info Short term error percentage\n"; + + print "${f}_ent_pi_error_long.label Long term\n"; + if (defined $warn{$f}{'Ent8'}) { + print "${f}_ent_pi_error_long.warning -0.03:0.03\n"; + print "${f}_ent_pi_error_long.critical -1.0:1.0\n"; + } + print "${f}_ent_pi_error_long.info Long term error percentage\n"; + } + + + for (@_) { + my $f = clean_fieldname($_); + + print "multigraph bb_ent.autocorr16_$f\n"; + print "graph_title BitBabbler $_ serial correlation (16-bit)\n"; + if (defined $warn{$f}{'Ent16'}) { + print "graph_args --base 1000\n"; # Don't inherit parent args + } else { + print "graph_args --base 1000 HRULE:-0.00008#bbbbff HRULE:0.00008#bbbbff\n"; + } + print "graph_vlabel Serial correlation coefficient\n"; + print "graph_scale yes\n"; + print "graph_printf %7.3lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows the autocorrelation coefficient for " + . "a lag of 1 over the sequence of samples. This gives a measure " + . "of the extent to which each sample is related to the previous one. " + . "A perfectly predictable stream will converge on a result of 1.0, " + . "and a perfectly unpredictable one will converge on a result of 0." + . "The short term result is a test of the 100 million most recently " + . "generated samples. The long term result is computed over all " + . "samples generated since the process being queried began. " + . "A sustained divergence away from 0 or values close to +/- 1 " + . "indicate a problem that ought to be investigated.\n"; + + print "${f}_ent16_autocorr_short.label Short term\n"; + print "${f}_ent16_autocorr_short.warning -0.00044:0.00044\n"; + print "${f}_ent16_autocorr_short.critical -0.005:0.005\n"; + print "${f}_ent16_autocorr_short.info Short term serial correlation\n"; + + print "${f}_ent16_autocorr_long.label Long term\n"; + if (defined $warn{$f}{'Ent16'}) { + print "${f}_ent16_autocorr_long.warning -0.00011:0.00011\n"; + print "${f}_ent16_autocorr_long.critical -0.001:0.001\n"; + } + print "${f}_ent16_autocorr_long.info Long term serial correlation\n"; + } + + for (@_) { + my $f = clean_fieldname($_); + + print "multigraph bb_ent.autocorr_$f\n"; + print "graph_title BitBabbler $_ serial correlation (8-bit)\n"; + if (defined $warn{$f}{'Ent8'}) { + print "graph_args --base 1000\n"; # Don't inherit parent args + } else { + print "graph_args --base 1000 HRULE:-0.0002#bbbbff HRULE:0.0002#bbbbff\n"; + } + print "graph_vlabel Serial correlation coefficient\n"; + print "graph_scale yes\n"; + print "graph_printf %7.3lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows the autocorrelation coefficient for " + . "a lag of 1 over the sequence of samples. This gives a measure " + . "of the extent to which each sample is related to the previous one. " + . "A perfectly predictable stream will converge on a result of 1.0, " + . "and a perfectly unpredictable one will converge on a result of 0." + . "The short term result is a test of the 500,000 most recently " + . "generated samples. The long term result is computed over all " + . "samples generated since the process being queried began. " + . "A sustained divergence away from 0 or values close to +/- 1 " + . "indicate a problem that ought to be investigated.\n"; + + print "${f}_ent_autocorr_short.label Short term\n"; + print "${f}_ent_autocorr_short.warning -0.0078:0.0078\n"; + print "${f}_ent_autocorr_short.critical -0.009:0.009\n"; + print "${f}_ent_autocorr_short.info Short term serial correlation\n"; + + print "${f}_ent_autocorr_long.label Long term\n"; + if (defined $warn{$f}{'Ent8'}) { + print "${f}_ent_autocorr_long.warning -0.00025:0.00025\n"; + print "${f}_ent_autocorr_long.critical -0.005:0.005\n"; + } + print "${f}_ent_autocorr_long.info Long term serial correlation\n"; + } + +} #}}} + +sub report_ent_values(@) +{ #{{{ + + print "multigraph bb_ent\n"; + for (@_) { + my $f = clean_fieldname($_); + my $ent = $json->[2]{$_}{'Ent8'} if exists $json->[2]{$_}; + + if (defined $ent) { + print "${f}_ent_entropy_short.value $ent->{'Short'}{'Current'}{'Entropy'}\n"; + print "${f}_ent_entropy_long.value $ent->{'Long'}{'Current'}{'Entropy'}\n"; + } else { + print "${f}_ent_entropy_short.value U\n"; + print "${f}_ent_entropy_long.value U\n"; + } + } + + + for my $n ('', '16') { + + my $e = $n ? 'Ent16' : 'Ent8'; + + for (@_) { + my $f = clean_fieldname($_); + my $ent = $json->[2]{$_}{$e} if exists $json->[2]{$_}; + + print "multigraph bb_ent.chisq${n}_$f\n"; + + if (defined $ent) { + print "${f}_ent${n}_chisq_short.value $ent->{'Short'}{'Current'}{'Chisq'}\n"; + print "${f}_ent${n}_chisq_long.value $ent->{'Long'}{'Current'}{'Chisq'}\n"; + } else { + print "${f}_ent${n}_chisq_short.value U\n"; + print "${f}_ent${n}_chisq_long.value U\n"; + } + } + + for (@_) { + my $f = clean_fieldname($_); + my $ent = $json->[2]{$_}{$e} if exists $json->[2]{$_}; + + print "multigraph bb_ent.entropy${n}_$f\n"; + + if (defined $ent) { + print "${f}_ent${n}_entropy_short.value $ent->{'Short'}{'Current'}{'Entropy'}\n"; + print "${f}_ent${n}_entropy_long.value $ent->{'Long'}{'Current'}{'Entropy'}\n"; + print "${f}_ent${n}_minentropy_short.value $ent->{'Short'}{'Current'}{'MinEntropy'}\n"; + print "${f}_ent${n}_minentropy_long.value $ent->{'Long'}{'Current'}{'MinEntropy'}\n"; + } else { + print "${f}_ent${n}_entropy_short.value U\n"; + print "${f}_ent${n}_entropy_long.value U\n"; + print "${f}_ent${n}_minentropy_short.value U\n"; + print "${f}_ent${n}_minentropy_long.value U\n"; + } + } + + for (@_) { + my $f = clean_fieldname($_); + my $ent = $json->[2]{$_}{$e} if exists $json->[2]{$_}; + + print "multigraph bb_ent.mean${n}_$f\n"; + + if (defined $ent) { + print "${f}_ent${n}_mean_short.value $ent->{'Short'}{'Current'}{'Mean'}\n"; + print "${f}_ent${n}_mean_long.value $ent->{'Long'}{'Current'}{'Mean'}\n"; + } else { + print "${f}_ent${n}_mean_short.value U\n"; + print "${f}_ent${n}_mean_long.value U\n"; + } + } + + for (@_) { + my $f = clean_fieldname($_); + my $ent = $json->[2]{$_}{$e} if exists $json->[2]{$_}; + + print "multigraph bb_ent.autocorr${n}_$f\n"; + + if (defined $ent) { + print "${f}_ent${n}_autocorr_short.value $ent->{'Short'}{'Current'}{'Autocorr'}\n"; + print "${f}_ent${n}_autocorr_long.value $ent->{'Long'}{'Current'}{'Autocorr'}\n"; + } else { + print "${f}_ent${n}_autocorr_short.value U\n"; + print "${f}_ent${n}_autocorr_long.value U\n"; + } + } + } + + for (@_) { + my $f = clean_fieldname($_); + my $ent = $json->[2]{$_}{'Ent8'} if exists $json->[2]{$_}; + + print "multigraph bb_ent.pi_error_$f\n"; + + if (defined $ent) { + print "${f}_ent_pi_error_short.value $ent->{'Short'}{'Current'}{'Pi-error'}\n"; + print "${f}_ent_pi_error_long.value $ent->{'Long'}{'Current'}{'Pi-error'}\n"; + } else { + print "${f}_ent_pi_error_short.value U\n"; + print "${f}_ent_pi_error_long.value U\n"; + } + } + +} #}}} + + +sub report_fips_pass_config(@) +{ #{{{ + + print "multigraph bb_fips_pass\n"; + print "graph_title BitBabbler FIPS 140-2 pass run length\n"; + print "graph_vlabel Consecutive tests without failure\n"; + print "graph_scale no\n"; + print "graph_printf %6.0lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows the run length between FIPS 140-2 test " + . "failures. A correctly working system should expect to see failure of " + . "the FIPS 140-2 tests about once in every 1250 blocks tested on average." + . " Occasional runs of much longer than that can be reasonably expected, " + . "with a run of 17500 or longer expected about once in 1.2 million tests " + . "(about 3.5TB of samples). A sustained lack of failures would indicate " + . "a problem that ought to be investigated.\n"; + + for (@_) { + my $f = clean_fieldname($_); + + print "${f}_pass_avg_short.label $_\n"; + print "${f}_pass_avg_short.info Short term average run of tests without failure\n"; + } + + + print "multigraph bb_fips_pass.longest\n"; + print "graph_title BitBabbler FIPS 140-2 longest pass run\n"; + print "graph_scale no\n"; + print "graph_printf %6.0lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows the longest run of consecutive blocks " + . "without a FIPS 140-2 test failure, since the process being queried " + . "began. A run of 17500 or longer is expected about once in 1.2 million " + . "blocks tested (about 3.5TB of samples), but runs longer than that are " + . "not impossible, just increasingly rare. The average rate graph is a " + . "better measure of correct operation than this one, but consistently " + . "unusual results for the peak run length would be something that ought " + . "to be investigated more closely.\n"; + + for (@_) { + my $f = clean_fieldname($_); + + print "${f}_pass_max.label $_\n"; + print "${f}_pass_max.info Longest run of tests without failure\n"; + } + + for (@_) { + my $f = clean_fieldname($_); + + print "multigraph bb_fips_pass.qa_$f\n"; + print "graph_title BitBabbler $_ FIPS 140-2 pass run length\n"; + print "graph_scale no\n"; + print "graph_printf %6.0lf\n"; + print "graph_category system\n"; + + print "${f}_pass_avg_short.label Short term average\n"; + print "${f}_pass_avg_short.warning 20000\n"; + print "${f}_pass_avg_short.info Average run of tests without failure\n"; + + print "${f}_pass_avg_long.label Long term average\n"; + print "${f}_pass_avg_long.warning 20000\n"; + print "${f}_pass_avg_long.info Average run of tests without failure\n"; + } + +} #}}} + +sub report_fips_pass_values(@) +{ #{{{ + + print "multigraph bb_fips_pass\n"; + for (@_) { + my $f = clean_fieldname($_); + my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; + + print "${f}_pass_avg_short.value " + . ($fips ? $fips->{'Result'}{'PassRuns'}{'Short'} : "U") . "\n"; + } + + print "multigraph bb_fips_pass.longest\n"; + for (@_) { + my $f = clean_fieldname($_); + my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; + + print "${f}_pass_max.value " + . ($fips ? $fips->{'Result'}{'PassRuns'}{'Peak'} : "U") . "\n"; + } + + for (@_) { + my $f = clean_fieldname($_); + my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; + + print "multigraph bb_fips_pass.qa_$f\n"; + + if (defined $fips) { + print "${f}_pass_avg_short.value $fips->{'Result'}{'PassRuns'}{'Short'}\n"; + print "${f}_pass_avg_long.value $fips->{'Result'}{'PassRuns'}{'Long'}\n"; + } else { + print "${f}_pass_avg_short.value U\n"; + print "${f}_pass_avg_long.value U\n"; + } + } + +} #}}} + + +sub report_fips_fail_config(@) +{ #{{{ + + print "multigraph bb_fips_fail\n"; + print "graph_title BitBabbler FIPS 140-2 testing\n"; + print "graph_vlabel Failed per 1000: long(-) / short(+) term\n"; + print "graph_scale no\n"; + print "graph_printf %6.4lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows the long and short term failure rates " + . "for the FIPS 140-2 tests on each source. The short term average " + . "tracks a window of the last 1000 tests. A correctly working system " + . "should expect to converge on just under 0.8 failures per thousand as " + . "the long term trend, with with the short term average varying from 0 " + . "with occasional peaks over 5 (as the rare, but not quite infinitely " + . "improbable, rough upper bound). A sustained short term rate greater " + . "than that would indicate a systemic failure.\n"; + + my $first = 1; + + for (@_) { + my $f = clean_fieldname($_); + + print "${f}_l.label $_\n"; + print "${f}_l.graph no\n"; + print "${f}_l.line -0.829:bbbbbb\n" if $first; + print "${f}_l.info Long term average rate of failures\n"; + + print "${f}_s.label $_\n"; + print "${f}_s.negative ${f}_l\n"; + print "${f}_s.line 0.829:bbbbbb:Expected average rate\n" if $first; + print "${f}_s.info Short term rolling average rate of failures\n"; + + $first = 0; + } + + + for (@_) { + my $f = clean_fieldname($_); + + print "multigraph bb_fips_fail.qa_$f\n"; + print "graph_title BitBabbler $_ average FIPS 140-2 failure rate\n"; + print "graph_vlabel Failed per 1000: long(-) / short(+) term\n"; + print "graph_scale no\n"; + print "graph_printf %6.4lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows the long and short term failure rates " + . "for the FIPS 140-2 tests. The short term average tracks a window " + . "of the last 1000 tests. A correctly working system should expect " + . "to converge on around 0.8 failures per thousand (of any test) as " + . "the long term trend, with with the short term average varying from " + . "0 with occasional peaks over 5 (as the rare, but not infinitely " + . "improbable, rough upper bound). A sustained short term rate greater " + . "than that would indicate a systemic failure. The expected (long term) " + . "rates of failure for each individual test are indicated below.\n"; + + print "${f}_l.label Failure rate\n"; + print "${f}_l.graph no\n"; + print "${f}_l.line -0.829:bbbbbb\n"; + + print "${f}_s.label Failure rate\n"; + print "${f}_s.negative ${f}_l\n"; + print "${f}_s.line 0.829:bbbbbb:Expected average rate\n"; + print "${f}_s.info Failure of any test\n"; + print "${f}_s.warning 5.5\n"; + print "${f}_s.critical 10.0\n"; + + + print "${f}_monl.label Monobit\n"; + print "${f}_monl.graph no\n"; + + print "${f}_mons.label Monobit\n"; + print "${f}_mons.negative ${f}_monl\n"; + print "${f}_mons.info Expect 0.104 per 1000\n"; + + + print "${f}_pokl.label Poker\n"; + print "${f}_pokl.graph no\n"; + + print "${f}_poks.label Poker\n"; + print "${f}_poks.negative ${f}_pokl\n"; + print "${f}_poks.info Expect 0.099 per 1000\n"; + + + print "${f}_runl.label Runs\n"; + print "${f}_runl.graph no\n"; + + print "${f}_runs.label Runs\n"; + print "${f}_runs.negative ${f}_runl\n"; + print "${f}_runs.info Expect 0.328 per 1000\n"; + + + print "${f}_lrl.label Long run\n"; + print "${f}_lrl.graph no\n"; + + print "${f}_lrs.label Long run\n"; + print "${f}_lrs.negative ${f}_lrl\n"; + print "${f}_lrs.info Expect 0.298 per 1000\n"; + + + print "${f}_repl.label Repetition\n"; + print "${f}_repl.graph no\n"; + + print "${f}_reps.label Repetition\n"; + print "${f}_reps.negative ${f}_repl\n"; + print "${f}_reps.info Expect to be very rare\n"; + } + + for (@_) { + my $f = clean_fieldname($_); + + print "multigraph bb_fips_fail.peak_$f\n"; + print "graph_title BitBabbler $_ peak FIPS 140-2 failure rate\n"; + print "graph_vlabel Max failure rate (per 1000 tests)\n"; + print "graph_scale no\n"; + print "graph_printf %6.4lf\n"; + print "graph_category system\n"; + print "graph_info This graph shows the worst case failure rates for the " + . "FIPS 140-2 tests since the process we are querying began. These " + . "are the peak values seen as the short term average over a window " + . "of the last 1000 tests.\n"; + + print "${f}_p.label Failure rate\n"; + print "${f}_p.info Failure of any test\n"; + + print "${f}_monp.label Monobit\n"; + print "${f}_pokp.label Poker\n"; + print "${f}_runp.label Runs\n"; + print "${f}_lrp.label Long run\n"; + print "${f}_repp.label Repetition\n"; + } + +} #}}} + +sub report_fips_fail_values(@) +{ #{{{ + + print "multigraph bb_fips_fail\n"; + for (@_) { + my $f = clean_fieldname($_); + my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; + + if (defined $fips) { + print "${f}_l.value " . $fips->{'Result'}{'FailRate'}{'Long'} * 1000 . "\n"; + print "${f}_s.value " . $fips->{'Result'}{'FailRate'}{'Short'} * 1000 . "\n"; + } else { + print "${f}_l.value U\n"; + print "${f}_s.value U\n"; + } + } + + for (@_) { + my $f = clean_fieldname($_); + my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; + + print "multigraph bb_fips_fail.qa_$f\n"; + + if (defined $fips) { + print "${f}_l.value " . $fips->{'Result'}{'FailRate'}{'Long'} * 1000 . "\n"; + print "${f}_s.value " . $fips->{'Result'}{'FailRate'}{'Short'} * 1000 . "\n"; + + print "${f}_monl.value " . $fips->{'Monobit'}{'FailRate'}{'Long'} * 1000 . "\n"; + print "${f}_mons.value " . $fips->{'Monobit'}{'FailRate'}{'Short'} * 1000 . "\n"; + + print "${f}_pokl.value " . $fips->{'Poker'}{'FailRate'}{'Long'} * 1000 . "\n"; + print "${f}_poks.value " . $fips->{'Poker'}{'FailRate'}{'Short'} * 1000 . "\n"; + + print "${f}_runl.value " . $fips->{'Runs'}{'FailRate'}{'Long'} * 1000 . "\n"; + print "${f}_runs.value " . $fips->{'Runs'}{'FailRate'}{'Short'} * 1000 . "\n"; + + print "${f}_lrl.value " . $fips->{'Long run'}{'FailRate'}{'Long'} * 1000 . "\n"; + print "${f}_lrs.value " . $fips->{'Long run'}{'FailRate'}{'Short'} * 1000 . "\n"; + + print "${f}_repl.value " . $fips->{'Repetition'}{'FailRate'}{'Long'} * 1000 . "\n"; + print "${f}_reps.value " . $fips->{'Repetition'}{'FailRate'}{'Short'} * 1000 . "\n"; + } else { + print "${f}_l.value U\n"; + print "${f}_s.value U\n"; + + print "${f}_monl.value U\n"; + print "${f}_mons.value U\n"; + + print "${f}_pokl.value U\n"; + print "${f}_poks.value U\n"; + + print "${f}_runl.value U\n"; + print "${f}_runs.value U\n"; + + print "${f}_lrl.value U\n"; + print "${f}_lrs.value U\n"; + + print "${f}_repl.value U\n"; + print "${f}_reps.value U\n"; + } + } + + for (@_) { + my $f = clean_fieldname($_); + my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; + + print "multigraph bb_fips_fail.peak_$f\n"; + + if (defined $fips) { + print "${f}_p.value " . $fips->{'Result'}{'FailRate'}{'Peak'} * 1000 . "\n"; + print "${f}_monp.value " . $fips->{'Monobit'}{'FailRate'}{'Peak'} * 1000 . "\n"; + print "${f}_pokp.value " . $fips->{'Poker'}{'FailRate'}{'Peak'} * 1000 . "\n"; + print "${f}_runp.value " . $fips->{'Runs'}{'FailRate'}{'Peak'} * 1000 . "\n"; + print "${f}_lrp.value " . $fips->{'Long run'}{'FailRate'}{'Peak'} * 1000 . "\n"; + print "${f}_repp.value " . $fips->{'Repetition'}{'FailRate'}{'Peak'} * 1000 . "\n"; + } else { + print "${f}_p.value U\n"; + print "${f}_monp.value U\n"; + print "${f}_pokp.value U\n"; + print "${f}_runp.value U\n"; + print "${f}_lrp.value U\n"; + print "${f}_repp.value U\n"; + } + } + +} #}}} + + +sub report_config() +{ #{{{ + + my $persist = $ENV{'persist_devices'} || "no"; + my @sources; + + @sources = restore_state() if $persist eq "yes"; + + eval { + get_ids(); + @sources = unique_list(@sources, @{$json->[2]}); + }; + + if (exists $ENV{'always_ignore'}) { + my %ignore; + my @remains; + + @ignore{split(' ',$ENV{'always_ignore'})} = (); + + for (@sources) { + push(@remains, $_) unless exists $ignore{$_}; + } + + @sources = @remains; + } + + save_state(@sources) if $persist eq "yes"; + + @sources = unique_list(@sources, split(' ',$ENV{'always_include'})) if $ENV{'always_include'}; + + report_bitrate_config(@sources); + report_ent_config(@sources); + report_fips_pass_config(@sources); + report_fips_fail_config(@sources); + +} #}}} + +sub report_values() +{ #{{{ + + get_stats(); + + my @sources; + + if (($ENV{'persist_devices'} || "") eq "yes") { + @sources = restore_state(); + } else { + @sources = keys %{$json->[2]}; + + if (exists $ENV{'always_ignore'}) { + my %ignore; + my @remains; + + @ignore{split(' ',$ENV{'always_ignore'})} = (); + + for (@sources) { + push(@remains, $_) unless exists $ignore{$_}; + } + + @sources = @remains; + } + } + + @sources = unique_list(@sources, split(' ',$ENV{'always_include'})) if $ENV{'always_include'}; + + report_bitrate_values(@sources); + report_ent_values(@sources); + report_fips_pass_values(@sources); + report_fips_fail_values(@sources); + +} #}}} + + +if (!defined $ARGV[0]) { + report_values(); +} +elsif ($ARGV[0] eq "config") { + report_config(); +} +elsif ($ARGV[0] eq "autoconf") { + # If the package providing this is installed, we presume you're going to + # want it enabled if munin-node is also installed. We could make this a + # bit more nuanced if this script is ever installed on a lot of systems + # where that isn't likely to be true. + print "yes\n"; +} + +# vi:sts=4:sw=4:et:foldmethod=marker diff -Nru bit-babbler-0.7/private_setup.h.in bit-babbler-0.8/private_setup.h.in --- bit-babbler-0.7/private_setup.h.in 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/private_setup.h.in 2018-02-07 23:56:52.000000000 +0000 @@ -190,6 +190,9 @@ /* Define to the version of this package. */ #undef PACKAGE_VERSION +/* Set the default to use for the seedd control socket */ +#undef SEEDD_CONTROL_SOCKET + /* Define to 1 if you have the ANSI C header files. */ #undef STDC_HEADERS diff -Nru bit-babbler-0.7/src/bbcheck.cpp bit-babbler-0.8/src/bbcheck.cpp --- bit-babbler-0.7/src/bbcheck.cpp 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/src/bbcheck.cpp 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2014 - 2017, Ron +// Copyright 2014 - 2018, Ron #include "private_setup.h" @@ -15,7 +15,8 @@ using BitB::BitBabbler; using BitB::QA::Ent8; using BitB::QA::BitRuns; -using BitB::StrToUL; +using BitB::StrToU; +using BitB::StrToScaledU; using BitB::StrToScaledUL; using BitB::StrToScaledD; using BitB::Log; @@ -264,7 +265,7 @@ Result::Vector m_results; - static size_t DecrementBitrate( size_t rate ) + static unsigned DecrementBitrate( unsigned rate ) { return 30000000 / (30000000 / rate + 1); } @@ -280,7 +281,7 @@ unsigned fold = b.GetFolding(); size_t bs = m_options.block_size; size_t len = m_options.test_len * (1u << fold); - unsigned sec = len * 8 / bbo.bitrate; + unsigned sec = unsigned(len * 8 / bbo.bitrate); unsigned min = sec / 60; size_t e8short_len = m_options.test_len; @@ -385,6 +386,9 @@ Test::Handle h = static_cast( p ); + // Drop the 'virtual handle' from the ctor, we have a real one now. + h->Unref(); + try { h->run_test_thread(); } @@ -404,6 +408,23 @@ using BitB::SystemError; + // Bump the refcount until the thread is started, otherwise we + // may lose a race with this Test being released by the caller + // before the thread can take its handle from the raw pointer. + // Think of it as a virtual Handle passed with pthread_create. + // + // In practice, this isn't actually a problem in the current code + // because the only time the Test might be destroyed before the + // thread has run its course is if we're crash diving our way out + // after getting an early termination signal, when no unwinding + // will be done anyway. But conceptually the problem is real in + // this class, so handle it correctly in case future use changes. + Ref(); + + // We don't need to Unref() if this fails, because we'll throw + // and it will never have been constructed to be destroyed ... + // That assumes this method is only ever called from the ctor, + // which currently is true. int ret = pthread_create( &m_threadid, BitB::GetDefaultThreadAttr(), test_thread, this ); if( ret ) @@ -504,7 +525,7 @@ printf("Per device options:\n"); printf(" --latency=ms Override the USB latency timer\n"); printf(" -f, --fold=n Set the amount of entropy folding\n"); - printf(" --enable=mask Select a subset of the generators\n"); + printf(" --enable-mask=mask Select a subset of the generators\n"); printf(" --limit-max-xfer Limit the transfer chunk size to 16kB\n"); printf("\n"); printf("Report bugs to support@bitbabbler.org\n"); @@ -524,9 +545,9 @@ enum { LATENCY_OPT, - ENABLE_OPT, - NOCOLOUR_OPT, + ENABLEMASK_OPT, LIMIT_MAX_XFER, + NOCOLOUR_OPT, VERSION_OPT }; @@ -539,7 +560,7 @@ { "block-size", required_argument, NULL, 'B' }, { "latency", required_argument, NULL, LATENCY_OPT }, { "fold", required_argument, NULL, 'f' }, - { "enable", required_argument, NULL, ENABLE_OPT }, + { "enable-mask", required_argument, NULL, ENABLEMASK_OPT }, { "limit-max-xfer", no_argument, NULL, LIMIT_MAX_XFER }, { "no-colour", no_argument, NULL, NOCOLOUR_OPT }, { "all-results", no_argument, NULL, 'A' }, @@ -591,14 +612,14 @@ { opt_testoptions.bitrate_min = opt_testoptions.bitrate_max = - BitBabbler::RealBitrate( StrToScaledD(optarg) ); + BitBabbler::RealBitrate( unsigned(StrToScaledD(optarg)) ); } else { opt_testoptions.bitrate_min = - BitBabbler::RealBitrate( StrToScaledD(r.substr(0,n)) ); + BitBabbler::RealBitrate( unsigned(StrToScaledD(r.substr(0,n))) ); opt_testoptions.bitrate_max = - BitBabbler::RealBitrate( StrToScaledD(r.substr(n+1)) ); + BitBabbler::RealBitrate( unsigned(StrToScaledD(r.substr(n+1))) ); } break; } @@ -608,12 +629,12 @@ break; case 'B': - opt_testoptions.block_size = StrToScaledUL( optarg, 1024 ); + opt_testoptions.block_size = StrToScaledU( optarg, 1024 ); break; case LATENCY_OPT: { - unsigned latency = StrToUL( optarg, 10 ); + unsigned latency = StrToU( optarg, 10 ); if( device_options.empty() ) default_options.latency = latency; @@ -625,7 +646,7 @@ case 'f': { - unsigned fold = StrToUL( optarg, 10 ); + unsigned fold = StrToU( optarg, 10 ); if( device_options.empty() ) default_options.fold = fold; @@ -635,9 +656,9 @@ break; } - case ENABLE_OPT: + case ENABLEMASK_OPT: { - unsigned mask = StrToUL( optarg ); + unsigned mask = StrToU( optarg ); if( device_options.empty() ) default_options.enable_mask = mask; diff -Nru bit-babbler-0.7/src/bbctl.cpp bit-babbler-0.8/src/bbctl.cpp --- bit-babbler-0.7/src/bbctl.cpp 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/src/bbctl.cpp 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2014 - 2017, Ron +// Copyright 2014 - 2018, Ron #include "private_setup.h" @@ -8,22 +8,18 @@ #include +#include #include -#if EM_PLATFORM_POSIX - #define DEFAULT_CONTROL_SOCK "/var/run/bit-babbler/seedd.socket" -#else - #define DEFAULT_CONTROL_SOCK "tcp:localhost:56789" -#endif - - using BitB::Json; using BitB::ClientSock; using BitB::QA::Ent8; using BitB::QA::Ent16; -using BitB::StrToUL; +using BitB::StrToU; +using BitB::StrToScaledU; using BitB::StrToScaledUL; +using BitB::Error; using BitB::Log; using BitB::stringprintf; using std::string; @@ -48,6 +44,7 @@ printf(" -S, --stats Report general QA statistics\n"); printf(" -c, --control-socket=path The service socket to query\n"); printf(" -V, --log-verbosity=n Change the logging verbosity\n"); + printf(" --waitfor=dev:n:r:max Wait for a device to pass some number of bytes\n"); printf(" -v, --verbose Enable verbose output\n"); printf(" -?, --help Show this help message\n"); printf(" --version Print the program version\n"); @@ -56,20 +53,88 @@ printf("\n"); } + +struct WaitFor +{ //{{{ + + typedef std::list< WaitFor > List; + + std::string deviceid; + size_t bytes; + size_t retry_ms; + size_t timeout_ms; + + WaitFor( const std::string &arg ) + : retry_ms( 1000 ) + , timeout_ms( 0 ) + { //{{{ + + // Parse the options from a string of the form: + // device:bytes:retry_ms:timeout_ms + // Where device and bytes are mandatory. + + size_t n = arg.find( ':' ); + + if( n == string::npos ) + throw Error( _("No byte count given in --waitfor=%s"), arg.c_str() ); + + deviceid = arg.substr( 0, n ); + ++n; + + try { + size_t n2 = arg.find( ':', n ); + + if( n2 == string::npos ) + { + bytes = StrToScaledUL( arg.substr(n), 1024 ); + return; + } + + bytes = StrToScaledUL( arg.substr(n, n2 - n), 1024 ); + n = n2 + 1; + n2 = arg.find( ':', n ); + + if( n2 == string::npos ) + { + retry_ms = StrToScaledUL( arg.substr(n) ); + goto done; + } + + retry_ms = StrToScaledUL( arg.substr(n, n2 - n) ); + n = n2 + 1; + timeout_ms = StrToScaledUL( arg.substr(n) ); + + done: + if( retry_ms < 1 ) + throw Error( _("Retry time must be >= 1ms in --waitfor=%s"), + arg.c_str() ); + } + catch( const std::exception &e ) + { + throw Error( _("Invalid --waitfor argument '%s': %s"), + arg.c_str(), e.what() ); + } + + } //}}} + +}; //}}} + + int main( int argc, char *argv[] ) { try { - unsigned opt_scan = 0; - unsigned opt_bin_count = 0; - unsigned opt_bin_freq = 0; - unsigned opt_bit_runs = 0; - unsigned opt_stats = 0; - unsigned opt_first = 65536; - unsigned opt_last = 65536; - unsigned opt_log_level = unsigned(-1); - string opt_deviceid; - string opt_controlsock = DEFAULT_CONTROL_SOCK; + unsigned opt_scan = 0; + unsigned opt_bin_count = 0; + unsigned opt_bin_freq = 0; + unsigned opt_bit_runs = 0; + unsigned opt_stats = 0; + unsigned opt_first = 65536; + unsigned opt_last = 65536; + unsigned opt_log_level = unsigned(-1); + string opt_deviceid; + string opt_controlsock = SEEDD_CONTROL_SOCKET; + WaitFor::List opt_wait; enum { @@ -77,6 +142,7 @@ BINCOUNT16_OPT, FIRST_OPT, LAST_OPT, + WAITFOR_OPT, VERSION_OPT }; @@ -94,6 +160,7 @@ { "stats", no_argument, NULL, 'S' }, { "control-socket", required_argument, NULL, 'c' }, { "log-verbosity", required_argument, NULL, 'V' }, + { "waitfor", required_argument, NULL, WAITFOR_OPT }, { "verbose", no_argument, NULL, 'v' }, { "help", no_argument, NULL, '?' }, { "version", no_argument, NULL, VERSION_OPT }, @@ -139,14 +206,14 @@ break; case FIRST_OPT: - opt_first = StrToScaledUL( optarg ); + opt_first = StrToScaledU( optarg ); if( opt_last == 65536 ) opt_last = 0; break; case LAST_OPT: - opt_last = StrToScaledUL( optarg ); + opt_last = StrToScaledU( optarg ); if( opt_first == 65536 ) opt_first = 0; break; @@ -164,7 +231,11 @@ break; case 'V': - opt_log_level = StrToUL( optarg, 10 ); + opt_log_level = StrToU( optarg, 10 ); + break; + + case WAITFOR_OPT: + opt_wait.push_back( WaitFor(optarg) ); break; case 'v': @@ -246,6 +317,66 @@ } //}}} + while( ! opt_wait.empty() ) + { //{{{ + + const WaitFor &w = opt_wait.front(); + size_t elapsed; + + if( w.timeout_ms ) + Log<1>( _("Waiting up to %zu ms for %zu good bytes from %s\n"), + w.timeout_ms, w.bytes, w.deviceid.c_str() ); + else + Log<1>( _("Waiting for %zu good bytes from %s\n"), + w.bytes, w.deviceid.c_str() ); + + for( elapsed = 0; w.timeout_ms == 0 || elapsed < w.timeout_ms; + elapsed += w.retry_ms ) + { + client.SendRequest( "[\"ReportStats\",1,\"" + w.deviceid + "\"]" ); + + Json::Handle json = client.Read(); + + Log<4>("read reply: %s\n", json->JSONStr().c_str() ); + + if( json[0]->String() == "ReportStats" ) + { + Json::Data::Handle stats = json[2]->Get( w.deviceid ); + + if( ! stats ) + throw Error( _("No statistics available for device '%s'"), + w.deviceid.c_str() ); + + unsigned long long passed = stats["QA"]["BytesPassed"]-> + As(); + if( passed >= w.bytes ) + { + Log<1>( _("Have %llu good bytes from %s in %zums\n"), + passed, w.deviceid.c_str(), elapsed ); + goto done; + } + + Log<3>( _("Have %llu good bytes from %s in %zums (waiting for %zu)\n"), + passed, w.deviceid.c_str(), elapsed, w.bytes ); + } else { + + // Possibly we should throw here too, but since this should + // never happen, assume it's a glitch and just try again. + Log<0>( "Unrecognised reply to ReportStats request\n" ); + } + + usleep( useconds_t( w.retry_ms * 1000 ) ); + } + + throw Error( _("Timeout after %zums waiting for %zu bytes from %s\n"), + elapsed, w.bytes, w.deviceid.c_str() ); + + done: + opt_wait.pop_front(); + + } //}}} + + if( opt_bin_freq ) { //{{{ @@ -392,8 +523,10 @@ for( Json::MemberList::iterator si = sources.begin(), se = sources.end(); si != se; ++si ) { - unsigned long long analysed = stats[*si]["QA"]["BytesAnalysed"]; - unsigned long long passed = stats[*si]["QA"]["BytesPassed"]; + unsigned long long analysed = stats[*si]["QA"]["BytesAnalysed"]-> + As(); + unsigned long long passed = stats[*si]["QA"]["BytesPassed"]-> + As(); BitB::QA::FIPS fips( stats[*si]["FIPS"] ); printf( "\nsource: %s\n", si->c_str() ); diff -Nru bit-babbler-0.7/src/seedd.cpp bit-babbler-0.8/src/seedd.cpp --- bit-babbler-0.7/src/seedd.cpp 2017-06-19 04:01:17.000000000 +0000 +++ bit-babbler-0.8/src/seedd.cpp 2018-02-07 23:56:52.000000000 +0000 @@ -1,5 +1,5 @@ // This file is distributed as part of the bit-babbler package. -// Copyright 2012 - 2017, Ron +// Copyright 2012 - 2018, Ron #ifndef _REENTRANT #error "seedd requires pthread support" @@ -7,6 +7,7 @@ #include "private_setup.h" +#include #include #include #include @@ -18,22 +19,20 @@ #include -#if EM_PLATFORM_POSIX - #define DEFAULT_CONTROL_SOCK "/var/run/bit-babbler/seedd.socket" -#else - #define DEFAULT_CONTROL_SOCK "tcp:localhost:56789" -#endif - - using BitB::BitBabbler; using BitB::Pool; using BitB::SocketSource; using BitB::ControlSock; using BitB::CreateControlSocket; using BitB::SecretSink; -using BitB::StrToUL; +using BitB::StrToU; +using BitB::StrToScaledU; using BitB::StrToScaledUL; using BitB::StrToScaledD; +using BitB::afterfirst; +using BitB::beforefirst; +using BitB::stringprintf; +using BitB::Error; using BitB::SystemError; using BitB::Log; @@ -47,18 +46,22 @@ printf("Options:\n"); printf(" -s, --scan Scan for available devices\n"); printf(" --shell-mr Output a machine readable list of devices\n"); + printf(" -C, --config=file Read configuration options from a file\n"); printf(" -i, --device-id=id Read from only the selected device(s)\n"); printf(" -b, --bytes=n Send n bytes to stdout\n"); + printf(" -o, --stdout Send entropy to stdout\n"); printf(" -d, --daemon Run as a background daemon\n"); printf(" -k, --kernel Feed entropy to the kernel\n"); printf(" -u, --udp-out=host:port Provide a UDP socket for entropy output\n"); - printf(" -o, --stdout Send entropy to stdout\n"); - printf(" -P, --pool-size=n Size of the entropy pool\n"); - printf(" -G, --group-size=g:n Size of a single pool group\n"); printf(" -c, --control-socket=path Where to create the control socket\n"); printf(" --socket-group=grp Grant group access to the control socket\n"); - printf(" --watch=path:ms:bs:n Monitor an external device\n"); + printf(" --ip-freebind Allow sockets to be bound to dynamic interfaces\n"); + printf(" -P, --pool-size=n Size of the entropy pool\n"); + printf(" --kernel-device=path Where to feed entropy to the OS kernel\n"); printf(" --kernel-refill=sec Max time in seconds before OS pool refresh\n"); + printf(" -G, --group-size=g:n Size of a single pool group\n"); + printf(" --watch=path:ms:bs:n Monitor an external device\n"); + printf(" --gen-conf Output a config file using the options passed\n"); printf(" -v, --verbose Enable verbose output\n"); printf(" -?, --help Show this help message\n"); printf(" --version Print the program version\n"); @@ -68,12 +71,12 @@ printf(" --latency=ms Override the USB latency timer\n"); printf(" -f, --fold=n Set the amount of entropy folding\n"); printf(" -g, --group=n The pool group to add the device to\n"); - printf(" --enable=mask Select a subset of the generators\n"); + printf(" --enable-mask=mask Select a subset of the generators\n"); printf(" --idle-sleep=init:max Tune the rate of pool refresh when idle\n"); printf(" --suspend-after=ms Set the threshold for USB autosuspend\n"); printf(" --low-power Convenience preset for idle and suspend\n"); - printf(" --no-qa Don't drop blocks that fail QA checking\n"); printf(" --limit-max-xfer Limit the transfer chunk size to 16kB\n"); + printf(" --no-qa Don't drop blocks that fail QA checking\n"); printf("\n"); printf("Report bugs to support@bitbabbler.org\n"); printf("\n"); @@ -82,7 +85,7 @@ #if EM_PLATFORM_POSIX -void WriteCompletion( void *p ) +static void WriteCompletion( void *p ) { pthread_t *t = static_cast( p ); pthread_kill( *t, SIGRTMIN ); @@ -94,7 +97,7 @@ static pthread_cond_t wait_cond = PTHREAD_COND_INITIALIZER; static int done_waiting = 0; -void WriteCompletion( void *p ) +static void WriteCompletion( void *p ) { (void)p; BitB::ScopedMutex lock( &wait_mutex ); @@ -104,38 +107,645 @@ #endif + +// Configuration options, imported from file(s) and/or the command line. +class Config : public BitB::IniData +{ //{{{ +private: + + // The last --device-id passed on the command line, which any + // subsequent per-device options there should be applied to. + std::string m_curdev; + Validator::Handle m_validator; + + + // Option validator for unsigned number values in any base. + static void UnsignedValue( const std::string &option, const std::string &value ) + { //{{{ + + try { + StrToU( value ); + } + catch( const std::exception &e ) + { + throw Error( _("Option '%s' expected integer: %s"), + option.c_str(), e.what() ); + } + + } //}}} + + // Option validator for base-10 unsigned integer values. + static void UnsignedBase10Value( const std::string &option, const std::string &value ) + { //{{{ + + try { + StrToU( value, 10 ); + } + catch( const std::exception &e ) + { + throw Error( _("Option '%s' expected decimal integer: %s"), + option.c_str(), e.what() ); + } + + } //}}} + + // Option validator for base-10 unsigned integer values, optionally scaled by a suffix. + static void ScaledUnsignedValue( const std::string &option, const std::string &value ) + { //{{{ + + try { + StrToScaledUL( value ); + } + catch( const std::exception &e ) + { + throw Error( _("Option '%s' expected decimal integer: %s"), + option.c_str(), e.what() ); + } + + } //}}} + + // Option validator for decimal fraction values, optionally scaled by a suffix. + static void ScaledFloatValue( const std::string &option, const std::string &value ) + { //{{{ + + try { + StrToScaledD( value ); + } + catch( const std::exception &e ) + { + throw Error( _("Option '%s' expected decimal value: %s"), + option.c_str(), e.what() ); + } + + } //}}} + + // Validate Sections and Options. + // We don't exhaustively validate all the option values here, mostly we just + // want to catch invalid section and option names, but there's no reason not + // to do basic sanity checking of the easy ones at this stage too. + void validate() + { //{{{ + + if( ! m_validator ) + { + // We may have multiple (or no) config files to validate, + // so create this once the first time that it's needed. + m_validator = new Validator; + + + // [Service] section options + Validator::OptionList::Handle service_opts = new Validator::OptionList; + + service_opts->AddTest( "daemon", Validator::OptionWithoutValue ) + ->AddTest( "kernel", Validator::OptionWithoutValue ) + ->AddTest( "udp-out", Validator::OptionWithValue ) + ->AddTest( "control-socket", Validator::OptionWithValue ) + ->AddTest( "socket-group", Validator::OptionWithValue ) + ->AddTest( "ip-freebind", Validator::OptionWithoutValue ) + ->AddTest( "verbose", UnsignedValue ); + + m_validator->Section( "Service", Validator::SectionNameEquals, service_opts ); + + + // [Pool] section options + Validator::OptionList::Handle pool_opts = new Validator::OptionList; + + pool_opts->AddTest( "size", ScaledUnsignedValue ) + ->AddTest( "kernel-device", Validator::OptionWithValue ) + ->AddTest( "kernel-refill", UnsignedBase10Value ); + + m_validator->Section( "Pool", Validator::SectionNameEquals, pool_opts ); + + + // [PoolGroup:] section options + Validator::OptionList::Handle poolgroup_opts = new Validator::OptionList; + + poolgroup_opts->AddTest( "size", ScaledUnsignedValue ); + + m_validator->Section( "PoolGroup:", Validator::SectionNamePrefix, poolgroup_opts ); + + + // [Devices] and [Device:] section options + Validator::OptionList::Handle device_opts = new Validator::OptionList; + + device_opts->AddTest( "bitrate", ScaledFloatValue ) + ->AddTest( "latency", UnsignedBase10Value ) + ->AddTest( "fold", UnsignedBase10Value ) + ->AddTest( "group", UnsignedBase10Value ) + ->AddTest( "enable-mask", UnsignedValue ) + ->AddTest( "idle-sleep", Validator::OptionWithValue ) + ->AddTest( "suspend-after", ScaledUnsignedValue ) + ->AddTest( "low-power", Validator::OptionWithoutValue ) + ->AddTest( "limit-max-xfer", Validator::OptionWithoutValue ) + ->AddTest( "no-qa", Validator::OptionWithoutValue ); + + m_validator->Section( "Devices", Validator::SectionNameEquals, device_opts ); + m_validator->Section( "Device:", Validator::SectionNamePrefix, device_opts ); + + + // [Watch:] section options + Validator::OptionList::Handle watch_opts = new Validator::OptionList; + + watch_opts->AddTest( "path", Validator::OptionWithValue ) + ->AddTest( "delay", ScaledUnsignedValue ) + ->AddTest( "block-size", ScaledUnsignedValue ) + ->AddTest( "max-bytes", ScaledUnsignedValue ); + + m_validator->Section( "Watch:", Validator::SectionNamePrefix, watch_opts ); + } + + m_validator->Validate( *this ); + + } //}}} + + + // Implementation detail to handle the 'low-power' option, which is set as + // a (per)Device option, implicitly setting the global Pool kernel-refill + // time too. We need to check this when exporting Pool options, even if + // there was no explict [Pool] section otherwise defined. + void check_pool_low_power_option( Pool::Options &p ) const + { //{{{ + + if( HasOption("Devices", "low-power") ) + { + p.kernel_refill_time = 3600; + } + else + { + const Sections &s = GetSections("Device:"); + + for( Sections::const_iterator i = s.begin(), e = s.end(); i != e; ++i ) + { + if( i->second->HasOption("low-power") ) + { + p.kernel_refill_time = 3600; + break; + } + } + } + + } //}}} + + // Implementation detail for extracting the per-device options from either + // the global [Devices] section or an individual [Device:] definition (with + // the global [Devices] options used as defaults for it unless overridden). + BitBabbler::Options + get_device_options( const std::string §ion, + const std::string &device_id = std::string(), + const BitBabbler::Options &defaults = BitBabbler::Options() ) const + { //{{{ + + BitBabbler::Options bbo = defaults; + Section::Handle s = GetSection( section ); + std::string opt; + + if( ! device_id.empty() ) + bbo.id = device_id; + + try { + opt = "bitrate"; + if( s->HasOption( opt ) ) + bbo.bitrate = unsigned(StrToScaledD( s->GetOption(opt) )); + + opt = "latency"; + if( s->HasOption( opt ) ) + bbo.latency = StrToU( s->GetOption(opt), 10 ); + + opt = "fold"; + if( s->HasOption( opt ) ) + bbo.fold = StrToU( s->GetOption(opt), 10 ); + + opt = "group"; + if( s->HasOption( opt ) ) + bbo.group = StrToU( s->GetOption(opt), 10 ); + + opt = "enable-mask"; + if( s->HasOption( opt ) ) + bbo.enable_mask = StrToU( s->GetOption(opt) ); + + opt = "low-power"; + if( s->HasOption( opt ) ) + { + bbo.SetIdleSleep( "100:0" ); + bbo.suspend_after = 10000; + } + + opt = "suspend-after"; + if( s->HasOption( opt ) ) + bbo.suspend_after = StrToScaledU( s->GetOption(opt) ); + + opt = "no-qa"; + if( s->HasOption( opt ) ) + bbo.no_qa = true; + + opt = "limit-max-xfer"; + if( s->HasOption( opt ) ) + bbo.chunksize = 16384; + + opt = "idle-sleep"; + if( s->HasOption( opt ) ) + bbo.SetIdleSleep( s->GetOption(opt) ); + } + catch( const std::exception &e ) + { + throw Error( _("Failed to apply [%s] option '%s': %s"), + section.c_str(), opt.c_str(), e.what() ); + } + + return bbo; + + } //}}} + + +public: + + // We only need a trivial default constructor at present. + Config() {} + + + // Update the current state with (additional) options from an INI file. + // As with command line options, where some option setting is duplicated, + // the last one applied will override any seen previously. + void ImportFile( const char *path ) + { //{{{ + + char buf[65536]; + std::string data; + size_t n; + FILE *f = fopen( path, "r" ); + + if( ! f ) + throw SystemError( _("Failed to open config file '%s'"), path ); + + while(( n = fread( buf, 1, sizeof(buf), f ) )) + data.append( buf, n ); + + fclose( f ); + + try { + UpdateWith( data ); + validate(); + } + catch( const std::exception &e ) + { + throw Error( _("Failed to import config from '%s': %s"), + path, e.what() ); + } + + } //}}} + + + // Export entropy Pool configuration options. + Pool::Options GetPoolOptions() const + { //{{{ + + Pool::Options p; + std::string opt; + + try { + if( HasSection("Pool") ) + { + Section::Handle s = GetSection("Pool"); + + opt = "size"; + if( s->HasOption( opt ) ) + p.pool_size = StrToScaledUL( s->GetOption(opt), 1024 ); + + opt = "kernel-device"; + if( s->HasOption( opt ) ) + p.kernel_device = s->GetOption(opt); + + // This one is set implicitly to 3600 if any device uses the + // low-power option, unless it is explicitly set directly. + opt = "kernel-refill"; + if( s->HasOption( opt ) ) + p.kernel_refill_time = StrToU( s->GetOption(opt), 10 ); + else + check_pool_low_power_option( p ); + } + else + { + opt = "kernel-refill (low-power)"; + check_pool_low_power_option( p ); + } + } + catch( const std::exception &e ) + { + throw Error( _("Failed to apply [Pool] option '%s': %s"), + opt.c_str(), e.what() ); + } + + return p; + + } //}}} + + // Export a list of defined entropy Pool groups. + Pool::Group::Options::List GetPoolGroupOptions() const + { //{{{ + + const Sections &s = GetSections("PoolGroup:"); + Pool::Group::Options::List g; + + for( Sections::const_iterator i = s.begin(), + e = s.end(); i != e; ++i ) + { + std::string opt = i->first + ':' + GetOption(i->second, "size"); + g.push_back( Pool::Group::Options( opt.c_str() ) ); + } + + return g; + + } //}}} + + + // Add a [Device:] definition for a --device-id passed on the command line. + // And remember the last device added that way so that any subsequent + // per-device options on the command line will be applied to it too. + void AddDevice( const char *id ) + { //{{{ + + m_curdev = stringprintf( "Device:%s", id ); + + if( ! HasSection( m_curdev ) ) + AddSection( m_curdev ); + + } //}}} + + // Set (or override) a per-device option from the command line. + // If no --device-id has been passed yet, the option will be set in the + // global [Devices] section, otherwise it will be set for the specific + // [Device:] which was last requested. + void SetDeviceOption( const std::string &option, + const std::string &value = std::string() ) + { + AddOrUpdateOption( m_curdev.empty() ? "Devices" : m_curdev, option, value ); + } + + // Export the default [Devices] options to use for any devices which don't + // have an explicit [Device:] configuration of their own. + BitBabbler::Options GetDefaultDeviceOptions() const + { //{{{ + + if( HasSection("Devices") ) + return get_device_options("Devices"); + + return BitBabbler::Options(); + + } //}}} + + // Export a list of the individual [Device:] configuration options for each + // device that has one configured for it. + BitBabbler::Options::List GetDeviceOptions() const + { //{{{ + + BitBabbler::Options::List bbol; + BitBabbler::Options default_options = GetDefaultDeviceOptions(); + const Sections &s = GetSections("Device:"); + + for( Sections::const_iterator i = s.begin(), e = s.end(); i != e; ++i ) + { + bbol.push_back( get_device_options( i->second->GetName(), + i->first, + default_options ) ); + } + + return bbol; + + } //}}} + + + // Add a new [Watch:] definition from options passed on the command line. + void AddWatch( const std::string &arg ) + { //{{{ + + using std::string; + + const Sections &s = GetSections("Watch:"); + unsigned next_watch = 0; + + // If there are numbered Watch sections, find the current largest number. + // If someone really uses a number larger than will fit in unsigned int, + // and mixes a config file with command line watches, they'll get what + // it is that they did to themselves. But unless they really have > 4G + // watches set, we will still probably find a safe number to use here + // even with some truncated value(s) in the mix. + // + // The alternative would be to just iterate next_watch from 0 until we + // find the first value which isn't a collision, but this way is probably + // nicer since it orders all command line watches after any defined with + // numeric identifiers in the config file. + for( Sections::const_iterator i = s.begin(), e = s.end(); i != e; ++i ) + { + try { + unsigned isnum = StrToU( i->first ); + + if( isnum >= next_watch ) + next_watch = isnum + 1; + } + catch( const std::exception& ) + { + // It's not an error for Watch identifiers to not be a number, + // we just don't take those into account when generating one + // for a watch specified on the command line. + + //Log<0>( "AddWatch: '%s' is not a number\n", i->first.c_str() ); + } + } + + //Log<0>( "Next watch is %u\n", n ); + + + // Parse the options struct from a string of the form: + // path:delay:block_size:total_bytes + // where everything except the path portion is optional. + // + // This is similar to what is done in SecretSink::Options::ParseOptArg() + // except we don't normalise the numeric values here, we just keep them + // as the literal strings which were passed on the command line for now. + // They'll get converted to numeric types when actually used. + + Section::Handle sect = AddSection( stringprintf("Watch:%u", next_watch) ); + size_t n = arg.find(':'); + size_t n2; + + if( n == string::npos ) + { + AddOption( sect, "path", arg ); + return; + } + AddOption( sect, "path", arg.substr(0, n) ); + + ++n; + n2 = arg.find( ':', n ); + + if( n2 == string::npos ) + { + AddOption( sect, "delay", arg.substr(n) ); + return; + } + AddOption( sect, "delay", arg.substr(n, n2 - n) ); + + n = n2 + 1; + n2 = arg.find( ':', n ); + + if( n2 == string::npos ) + { + AddOption( sect, "block-size", arg.substr(n) ); + return; + } + AddOption( sect, "block-size", arg.substr(n, n2 - n) ); + + n = n2 + 1; + + AddOption( sect, "max-bytes", arg.substr(n) ); + + } //}}} + + // Export a list of the source Watches to enable. + SecretSink::Options::List GetWatchOptions() const + { //{{{ + + const Sections &s = GetSections("Watch:"); + SecretSink::Options::List w; + + for( Sections::const_iterator i = s.begin(), + e = s.end(); i != e; ++i ) + { + // Track which option we're applying, so that we + // can report its name if an exception is thrown. + std::string opt; + + try { + SecretSink::Options sso; + + opt = "path"; + if( i->second->HasOption( opt ) ) + sso.devpath = i->second->GetOption( opt ); + else + throw Error( _("No path defined to Watch") ); + + opt = "delay"; + if( i->second->HasOption( opt ) ) + sso.block_delay = StrToScaledUL( i->second->GetOption( opt ) ); + + opt = "block-size"; + if( i->second->HasOption( opt ) ) + sso.block_size = StrToScaledUL( i->second->GetOption( opt ), 1024 ); + + opt = "max-bytes"; + if( i->second->HasOption( opt ) ) + sso.bytes = StrToScaledUL( i->second->GetOption( opt ), 1024 ); + + w.push_back( sso ); + } + catch( const std::exception &e ) + { + throw Error( _("Failed to apply [Watch:%s] option '%s': %s"), + i->first.c_str(), opt.c_str(), e.what() ); + } + } + + return w; + + } //}}} + + + // Specialisation of IniData::INIStr() to output the expected sections in + // a logical (for users) and deterministic (if using a hashed map) order. + // This string may be saved and later passed to ImportFile() or Decode() + // to recreate the current configuration state. + std::string ConfigStr() const + { //{{{ + + std::string out; + Sections s = GetSections(), ss; + Sections::iterator i = s.find("Service"), e; + + // Output the Service section + if( i != s.end() ) + { + out.append( i->second->INIStr() + '\n' ); + s.erase( i ); + } + + // Output the Pool section + i = s.find("Pool"); + if( i != s.end() ) + { + out.append( i->second->INIStr() + '\n' ); + s.erase( i ); + } + + // Output the PoolGroup section(s) + ss = GetSections("PoolGroup:"); + for( i = ss.begin(), e = ss.end(); i != e; ++i ) + { + out.append( i->second->INIStr() + '\n' ); + s.erase( i->second->GetName() ); + } + + // Output the Devices section + i = s.find("Devices"); + if( i != s.end() ) + { + out.append( i->second->INIStr() + '\n' ); + s.erase( i ); + } + + // Output the Device section(s) + ss = GetSections("Device:"); + for( i = ss.begin(), e = ss.end(); i != e; ++i ) + { + out.append( i->second->INIStr() + '\n' ); + s.erase( i->second->GetName() ); + } + + // Output the Watch section(s) + ss = GetSections("Watch:"); + for( i = ss.begin(), e = ss.end(); i != e; ++i ) + { + out.append( i->second->INIStr() + '\n' ); + s.erase( i->second->GetName() ); + } + + // Output whatever else is still left + for( i = s.begin(), e = s.end(); i != e; ++i ) + out.append( i->second->INIStr() + '\n' ); + + return out; + + } //}}} + +}; //}}} + + int main( int argc, char *argv[] ) { try { + Config conf; unsigned opt_scan = 0; size_t opt_bytes = 0; - unsigned opt_daemon = 0; - unsigned opt_kernel = 0; unsigned opt_stdout = 0; - std::string opt_controlsock; - std::string opt_socketgroup; - std::string opt_socket_source; - - Pool::Options pool_options; - Pool::Group::Options::List group_options; - BitBabbler::Options default_options; - BitBabbler::Options::List device_options; - SecretSink::Options::List watch_options; + int opt_v = 0; + bool opt_genconf = false; enum { SHELL_MR_OPT, - LATENCY_OPT, - ENABLE_OPT, - NOQA_OPT, + FREEBIND_OPT, SOCKET_GROUP_OPT, - WATCH_OPT, + KERNEL_DEVICE_OPT, KERNEL_REFILL_TIME_OPT, + LATENCY_OPT, + ENABLEMASK_OPT, IDLE_SLEEP_OPT, SUSPEND_AFTER_OPT, LOW_POWER_OPT, LIMIT_MAX_XFER, + NOQA_OPT, + WATCH_OPT, + GENERATE_CONFIG_OPT, VERSION_OPT }; @@ -143,28 +753,36 @@ { { "scan", no_argument, NULL, 's' }, { "shell-mr", no_argument, NULL, SHELL_MR_OPT }, + { "config", required_argument, NULL, 'C' }, { "device-id", required_argument, NULL, 'i' }, + { "bytes", required_argument, NULL, 'b' }, + { "stdout", no_argument, NULL, 'o' }, + { "daemon", no_argument, NULL, 'd' }, + { "kernel", no_argument, NULL, 'k' }, + { "ip-freebind", no_argument, NULL, FREEBIND_OPT }, + { "udp-out", required_argument, NULL, 'u' }, + { "control-socket", required_argument, NULL, 'c' }, + { "socket-group", required_argument, NULL, SOCKET_GROUP_OPT }, + + { "pool-size", required_argument, NULL, 'P' }, + { "kernel-device", required_argument, NULL, KERNEL_DEVICE_OPT }, + { "kernel-refill", required_argument, NULL, KERNEL_REFILL_TIME_OPT }, + { "group-size", required_argument, NULL, 'G' }, + { "bitrate", required_argument, NULL, 'r' }, { "latency", required_argument, NULL, LATENCY_OPT }, { "fold", required_argument, NULL, 'f' }, { "group", required_argument, NULL, 'g' }, - { "enable", required_argument, NULL, ENABLE_OPT }, + { "enable-mask", required_argument, NULL, ENABLEMASK_OPT }, { "idle-sleep", required_argument, NULL, IDLE_SLEEP_OPT }, { "suspend-after", required_argument, NULL, SUSPEND_AFTER_OPT }, { "low-power", no_argument, NULL, LOW_POWER_OPT }, - { "no-qa", no_argument, NULL, NOQA_OPT }, { "limit-max-xfer", no_argument, NULL, LIMIT_MAX_XFER }, - { "bytes", required_argument, NULL, 'b' }, - { "daemon", no_argument, NULL, 'd' }, - { "kernel", no_argument, NULL, 'k' }, - { "udp-out", required_argument, NULL, 'u' }, - { "stdout", no_argument, NULL, 'o' }, - { "pool-size", required_argument, NULL, 'P' }, - { "group-size", required_argument, NULL, 'G' }, - { "control-socket", required_argument, NULL, 'c' }, - { "socket-group", required_argument, NULL, SOCKET_GROUP_OPT }, + { "no-qa", no_argument, NULL, NOQA_OPT }, + { "watch", required_argument, NULL, WATCH_OPT }, - { "kernel-refill", required_argument, NULL, KERNEL_REFILL_TIME_OPT }, + + { "gen-conf", no_argument, NULL, GENERATE_CONFIG_OPT }, { "verbose", no_argument, NULL, 'v' }, { "help", no_argument, NULL, '?' }, { "version", no_argument, NULL, VERSION_OPT }, @@ -176,7 +794,7 @@ for(;;) { //{{{ - int c = getopt_long( argc, argv, ":si:r:f:g:b:dku:oP:G:c:v?", + int c = getopt_long( argc, argv, ":sC:i:r:f:g:b:dku:oP:G:c:v?", long_options, &opt_index ); if( c == -1 ) break; @@ -191,181 +809,116 @@ opt_scan = 2; break; - case 'i': - { - BitBabbler::Options bbo = default_options; - - try { - bbo.id = optarg; - } - catch( const std::exception &e ) - { - fprintf( stderr, "%s: error, %s\n", argv[0], e.what() ); - return EXIT_FAILURE; - } - - device_options.push_back( bbo ); + case 'C': + conf.ImportFile( optarg ); break; - } - - case 'r': - { - unsigned bitrate = StrToScaledD( optarg ); - - if( device_options.empty() ) - default_options.bitrate = bitrate; - else - device_options.back().bitrate = bitrate; + case 'i': + conf.AddDevice( optarg ); break; - } - - case LATENCY_OPT: - { - unsigned latency = StrToUL( optarg, 10 ); - - if( device_options.empty() ) - default_options.latency = latency; - else - device_options.back().latency = latency; + case 'b': + opt_bytes = StrToScaledUL( optarg, 1024 ); break; - } - - case 'f': - { - unsigned fold = StrToUL( optarg, 10 ); - if( device_options.empty() ) - default_options.fold = fold; - else - device_options.back().fold = fold; + case 'o': + opt_stdout = 1; + break; + case 'd': + conf.AddOrUpdateOption( "Service", "daemon" ); break; - } - case 'g': - { - unsigned group = StrToUL( optarg, 10 ); + case 'k': + conf.AddOrUpdateOption( "Service", "kernel" ); + break; - if( device_options.empty() ) - default_options.group = group; - else - device_options.back().group = group; + case FREEBIND_OPT: + conf.AddOrUpdateOption( "Service", "ip-freebind" ); + break; + case 'u': + conf.AddOrUpdateOption( "Service", "udp-out", optarg ); break; - } case 'c': - opt_controlsock = optarg; + conf.AddOrUpdateOption( "Service", "control-socket", optarg ); break; case SOCKET_GROUP_OPT: - opt_socketgroup = optarg; + conf.AddOrUpdateOption( "Service", "socket-group", optarg ); break; - case ENABLE_OPT: - { - unsigned mask = StrToUL( optarg ); - - if( device_options.empty() ) - default_options.enable_mask = mask; - else - device_options.back().enable_mask = mask; - + case 'P': + conf.AddOrUpdateOption( "Pool", "size", optarg ); break; - } - case IDLE_SLEEP_OPT: - try { - if( device_options.empty() ) - default_options.SetIdleSleep( optarg ); - else - device_options.back().SetIdleSleep( optarg ); - } - catch( const std::exception &e ) - { - fprintf( stderr, "%s: error, %s\n", argv[0], e.what() ); - return EXIT_FAILURE; - } + case KERNEL_DEVICE_OPT: + conf.AddOrUpdateOption( "Pool", "kernel-device", optarg ); break; - case SUSPEND_AFTER_OPT: - if( device_options.empty() ) - default_options.suspend_after = StrToScaledUL( optarg ); - else - device_options.back().suspend_after = StrToScaledUL( optarg ); - + case KERNEL_REFILL_TIME_OPT: + conf.AddOrUpdateOption( "Pool", "kernel-refill", optarg ); break; - case LOW_POWER_OPT: - if( device_options.empty() ) - { - default_options.SetIdleSleep( "100:0" ); - default_options.suspend_after = 10000; - } else { - device_options.back().SetIdleSleep( "100:0" ); - device_options.back().suspend_after = 10000; - } - pool_options.kernel_refill_time = 3600; + case 'G': + { + std::string s( optarg ); + conf.AddOrUpdateOption( "PoolGroup:" + beforefirst(':', s), + "size", afterfirst(':', s) ); break; + } - case NOQA_OPT: - if( device_options.empty() ) - default_options.no_qa = true; - else - device_options.back().no_qa = true; - + case 'r': + conf.SetDeviceOption( "bitrate", optarg ); break; - case LIMIT_MAX_XFER: - if( device_options.empty() ) - default_options.chunksize = 16384; - else - device_options.back().chunksize = 16384; - + case LATENCY_OPT: + conf.SetDeviceOption( "latency", optarg ); break; - case 'b': - opt_bytes = StrToScaledUL( optarg, 1024 ); + case 'f': + conf.SetDeviceOption( "fold", optarg ); break; - case 'd': - opt_daemon = 1; - opt_kernel = 1; - BitB::opt_syslog = 1; + case 'g': + conf.SetDeviceOption( "group", optarg ); break; - case 'k': - opt_kernel = 1; + case ENABLEMASK_OPT: + conf.SetDeviceOption( "enable-mask", optarg ); break; - case 'u': - opt_socket_source = optarg; + case IDLE_SLEEP_OPT: + conf.SetDeviceOption( "idle-sleep", optarg ); break; - case 'o': - opt_stdout = 1; + case SUSPEND_AFTER_OPT: + conf.SetDeviceOption( "suspend-after", optarg ); break; - case 'P': - pool_options.pool_size = StrToScaledUL( optarg, 1024 ); + case LOW_POWER_OPT: + conf.SetDeviceOption( "low-power" ); break; - case KERNEL_REFILL_TIME_OPT: - pool_options.kernel_refill_time = StrToUL( optarg, 10 ); + case LIMIT_MAX_XFER: + conf.SetDeviceOption( "limit-max-xfer" ); break; - case 'G': - group_options.push_back( Pool::Group::Options( optarg ) ); + case NOQA_OPT: + conf.SetDeviceOption( "no-qa" ); break; case WATCH_OPT: - watch_options.push_back( SecretSink::Options::ParseOptArg( optarg ) ); + conf.AddWatch( optarg ); + break; + + case GENERATE_CONFIG_OPT: + opt_genconf = true; break; case 'v': - ++BitB::opt_verbose; + ++opt_v; break; case '?': @@ -375,6 +928,16 @@ argv[0], optopt); return EXIT_FAILURE; } + + // If we're generating a config, don't dump the usage to stdout + // under any circumstances and do return an EXIT_FAILURE code. + if( opt_genconf ) + { + fprintf(stderr, "%s: invalid option used, not generating config\n", + argv[0]); + return EXIT_FAILURE; + } + usage(); return EXIT_SUCCESS; @@ -391,15 +954,69 @@ } //}}} - #if EM_PLATFORM_POSIX + std::string notify_socket = BitB::GetSystemdNotifySocket(); + + // If we've been started by systemd in notify mode we need to stay in the + // foreground regardless of what config options we may have been passed. + if( ! notify_socket.empty() ) + conf.RemoveOption( "Service", "daemon" ); + + + // Just output a configuration file (based on the options passed) and exit. + if( opt_genconf ) + { //{{{ + + std::string cmd_line; + + for( int i = 0; i < argc; ++i ) + cmd_line.append( stringprintf(" %s", argv[i]) ); - if( BitB::opt_syslog ) - openlog( argv[0], LOG_PID, LOG_DAEMON ); + // We don't usually push the -v command line override into the config + // but do it here, because we want that in the generated config if used. + if( opt_v ) + conf.AddOrUpdateOption( "Service", "verbose", stringprintf("%d", opt_v) ); + + printf( "# Generated configuration file for seedd(1), created %s using:\n" + "# %s\n%s\n", + BitB::timeprintf( "%F", BitB::GetWallTimeval() ).c_str(), + cmd_line.c_str(), conf.ConfigStr().c_str() ); - if( opt_daemon && ! opt_scan ) + return EXIT_SUCCESS; + + } //}}} + + + // Pump up the volume (if asked to) + if( opt_v ) + BitB::opt_verbose = opt_v; + else if ( conf.HasOption("Service", "verbose") ) + BitB::opt_verbose = int(StrToU( conf.GetOption("Service", "verbose") )); + + // And send it to syslog if we'll be running in the background. + if( conf.HasOption("Service", "daemon") && ! opt_scan ) + BitB::SendLogsToSyslog( argv[0] ); + + + if( ! notify_socket.empty() ) + Log<4>( "NOTIFY_SOCKET='%s'\n", notify_socket.c_str() ); + + Log<2>( "Using configuration:\n%s", conf.ConfigStr().c_str() ); + + // Extract and (initially) sanity check these before going to + // the background if we're going to be running this as a daemon. + Pool::Options pool_options = conf.GetPoolOptions(); + Pool::Group::Options::List group_options = conf.GetPoolGroupOptions(); + SecretSink::Options::List watch_options = conf.GetWatchOptions(); + BitBabbler::Options default_options = conf.GetDefaultDeviceOptions(); + BitBabbler::Options::List device_options = conf.GetDeviceOptions(); + + + #if EM_PLATFORM_POSIX + + if( conf.HasOption("Service", "daemon") && ! opt_scan ) { if( daemon(0,0) ) - throw SystemError( _("seedd: Failed to fork daemon") ); + throw SystemError( _("Failed to fork daemon") ); umask( S_IWGRP | S_IROTH | S_IWOTH | S_IXOTH ); } @@ -407,7 +1024,12 @@ BitB::BlockSignals(); #else - (void)opt_daemon; + + // We could implement support for this if/when needed, but it's less useful + // on systems where we don't sit in the background feeding the OS kernel. + if( conf.HasOption("Service", "daemon") && ! opt_scan ) + throw Error( _("Daemon mode not supported on this platform.") ); + #endif @@ -450,13 +1072,14 @@ SocketSource::Handle ssrc; - if( ! opt_socket_source.empty() ) + if( conf.HasOption("Service", "udp-out") ) { opt_bytes = 0; - ssrc = new SocketSource( pool, opt_socket_source ); + ssrc = new SocketSource( pool, conf.GetOption("Service", "udp-out"), + conf.HasOption("Service", "ip-freebind") ); } - if( opt_kernel ) + if( conf.HasOption("Service", "kernel") ) { opt_bytes = 0; pool->FeedKernelEntropyAsync(); @@ -464,8 +1087,8 @@ if( opt_stdout || opt_bytes ) { - if( opt_bytes && opt_controlsock.empty() ) - opt_controlsock = "none"; + if( opt_bytes && ! conf.HasOption("Service", "control-socket") ) + conf.AddOrUpdateOption( "Service", "control-socket", "none" ); #if EM_PLATFORM_MSW setmode( STDOUT_FILENO, O_BINARY ); @@ -480,11 +1103,16 @@ watch_sinks.push_back( new SecretSink( *i ) ); - if( opt_controlsock.empty() ) - opt_controlsock = DEFAULT_CONTROL_SOCK; + ControlSock::Handle ctl = CreateControlSocket( conf.GetOption( "Service", "control-socket", + SEEDD_CONTROL_SOCKET ), + conf.GetOption( "Service", "socket-group", + std::string() ), + conf.HasOption( "Service", "ip-freebind" ) ); - ControlSock::Handle ctl = CreateControlSocket( opt_controlsock, opt_socketgroup ); + // If we've been started by systemd in notify mode, then notify it ... + if( ! notify_socket.empty() ) + BitB::SystemdNotify( "READY=1", notify_socket ); #if EM_PLATFORM_POSIX @@ -522,6 +1150,18 @@ #endif + + // If we've been started by systemd in notify mode, humour it again ... + //{{{ + // This is mostly useless, but not entirely, because exiting this scope + // isn't the end of us yet, there's a bunch of shutdown still to be done + // including terminating threads in the unwinding which happens next. + // This really is still just the beginning of the end, some pathological + // worst case could keep us hanging on for longer than we expected to. + //}}} + if( ! notify_socket.empty() ) + BitB::SystemdNotify( "STOPPING=1", notify_socket ); + return EXIT_SUCCESS; } BB_CATCH_ALL( 0, _("seedd fatal exception") )