diff -Nru cgreen-1.3.0/cmake/Modules/DefineCompilerFlags.cmake cgreen-1.6.3/cmake/Modules/DefineCompilerFlags.cmake --- cgreen-1.3.0/cmake/Modules/DefineCompilerFlags.cmake 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/cmake/Modules/DefineCompilerFlags.cmake 2023-10-03 15:16:52.000000000 +0000 @@ -7,12 +7,27 @@ set (COMPILER_IS_CLANG TRUE) endif (${CMAKE_C_COMPILER_ID} MATCHES "Clang") +if (CGREEN_WITH_XML) + add_definitions(-DHAVE_XML_REPORTER=1) +endif (CGREEN_WITH_XML) + +if (CGREEN_WITH_LIBXML2) + add_definitions(-DHAVE_LIBXML2_REPORTER=1) +endif (CGREEN_WITH_LIBXML2) + if (UNIX) if (CMAKE_COMPILER_IS_GNUCC OR COMPILER_IS_CLANG) # add_compile_options(-Wall -Wextra -Wunused) # only since CMake 2.8.12, so... add_definitions(-Wall -Wextra -Wunused) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++98 -Weffc++") + if (CGREEN_WITH_LIBXML2) + # libxml2 headers depend on ICU library for Unicode support, + # but ICU headers do not even compile with C++ 98. + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + else () + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++98") + endif (CGREEN_WITH_LIBXML2) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weffc++") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99 -Wstrict-prototypes") if (CGREEN_INTERNAL_WITH_GCOV) @@ -22,8 +37,10 @@ endif (CGREEN_INTERNAL_WITH_GCOV) add_definitions(-D_REENTRANT) # for gmtime_r() - add_definitions(-D_XOPEN_SOURCE) # for popen() and pclose() - add_definitions(-D_XOPEN_SOURCE_EXTENDED) # for strdup(), which isn't part of C99 + if (NOT ${CMAKE_SYSTEM_NAME} MATCHES ".*OpenBSD.*") + add_definitions(-D_XOPEN_SOURCE) # for popen() and pclose() + add_definitions(-D_XOPEN_SOURCE_EXTENDED) # for strdup(), which isn't part of C99 + endif() add_definitions(-D__STDC_FORMAT_MACROS) # for PRI*PTR format macros, required by C99 if (NOT CYGWIN) @@ -42,11 +59,6 @@ if (NOT CMAKE_BUILD_TYPE STREQUAL "Debug") add_definitions(-O) - else () - check_c_compiler_flag("-Og" WITH_OPTIMIZE_FOR_DEBUG) - if (WITH_OPTIMIZE_FOR_DEBUG) - add_definitions(-Og) - endif () endif () endif (CMAKE_COMPILER_IS_GNUCC OR COMPILER_IS_CLANG) endif (UNIX) diff -Nru cgreen-1.3.0/cmake/Modules/DefineRelativeFilePaths.cmake cgreen-1.6.3/cmake/Modules/DefineRelativeFilePaths.cmake --- cgreen-1.3.0/cmake/Modules/DefineRelativeFilePaths.cmake 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/cmake/Modules/DefineRelativeFilePaths.cmake 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,15 @@ +# Source: https://stackoverflow.com/questions/237542/getting-base-name-of-the-source-file-at-compile-time + +function (cmake_define_relative_file_paths SOURCES) + foreach (SOURCE IN LISTS SOURCES) + file ( + RELATIVE_PATH RELATIVE_SOURCE_PATH + ${PROJECT_SOURCE_DIR} ${SOURCE} + ) + + set_source_files_properties ( + ${SOURCE} PROPERTIES + COMPILE_DEFINITIONS FILENAME="${RELATIVE_SOURCE_PATH}" + ) + endforeach () +endfunction () diff -Nru cgreen-1.3.0/cmake/Modules/FindFLEX.cmake cgreen-1.6.3/cmake/Modules/FindFLEX.cmake --- cgreen-1.3.0/cmake/Modules/FindFLEX.cmake 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/cmake/Modules/FindFLEX.cmake 1970-01-01 00:00:00.000000000 +0000 @@ -1,134 +0,0 @@ -# - Find flex executable and provides a macro to generate custom build rules -# The module defines the following variables: -# FLEX_FOUND - true is flex executable is found -# FLEX_VERSION - the version of flex -# If flex is found on the system, the module provides the macro: -# FLEX_TARGET(Name FlexInput FlexOutput [COMPILE_FLAGS ]) -# which creates a custom command to generate the file from -# the file. If COMPILE_FLAGS option is specified, the next -# parameter is added to the flex command line. Name is an alias used to -# get details of this custom command. Indeed the macro defines the -# following variables: -# FLEX_${Name}_DEFINED - true is the macro ran successfully -# FLEX_${Name}_OUTPUTS - the source file generated by the custom rule, an -# alias for FlexOutput -# FLEX_${Name}_INPUT - the flex source file, an alias for ${FlexInput} -# -# Flex scanners oftenly use tokens defined by Bison: the code generated -# by Flex depends of the header generated by Bison. This module also -# defines a macro: -# ADD_FLEX_BISON_DEPENDENCY(FlexTarget BisonTarget) -# which adds the required dependency between a scanner and a parser -# where and are the first parameters of -# respectively FLEX_TARGET and BISON_TARGET macros. -# -# Example: -# FIND_PACKAGE(BISON) -# FIND_PACKAGE(FLEX) -# BISON_TARGET(MyParser parser.y ${PROJECT_BINARY_DIR}/parser.cpp -# FLEX_TARGET(MyScanner lexer.l ${PROJECT_BINARY_DIR}/lexer.cpp) -# ADD_FLEX_BISON_DEPENDENCY(MyScanner MyParser) -# - -# Copyright (c) 2006, Tristan Carel -# All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the University of California, Berkeley nor the -# names of its contributors may be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# $Id:: FindFLEX.cmake 3 2006-11-03 02:42:02Z ken $ - -SET(FLEX_FOUND FALSE) - -FIND_PROGRAM(FLEX_EXECUTABLE flex DOC "path to the flex executable") -MARK_AS_ADVANCED(FLEX_EXECUTABLE) - -FIND_LIBRARY(FL_LIBRARY NAMES fl - PATHS /usr/lib DOC "path to the fl library") -SET(FLEX_LIBRARIES ${FL_LIBRARY}) - -IF(FLEX_EXECUTABLE) - SET(FLEX_FOUND TRUE) - - EXECUTE_PROCESS(COMMAND ${FLEX_EXECUTABLE} --version - OUTPUT_VARIABLE FLEX_version_output - ERROR_VARIABLE FLEX_version_error - RESULT_VARIABLE FLEX_version_result - OUTPUT_STRIP_TRAILING_WHITESPACE) - IF(NOT ${FLEX_version_result} EQUAL 0) - MESSAGE(SEND_ERROR "Command \"${FLEX_EXECUTABLE} --version\" failed with output:\n${FLEX_version_error}") - ELSE(NOT ${FLEX_version_result} EQUAL 0) - STRING(REGEX REPLACE "^flex (.*)$" "\\1" - FLEX_VERSION "${FLEX_version_output}") - ENDIF(NOT ${FLEX_version_result} EQUAL 0) - - MACRO(FLEX_TARGET Name Input Output) - SET(FLEX_TARGET_usage "FLEX_TARGET( [COMPILE_FLAGS ]") - IF(${ARGC} GREATER 3) - IF(${ARGC} EQUAL 5) - IF("${ARGV3}" STREQUAL "COMPILE_FLAGS") - SET(FLEX_EXECUTABLE_opts "${ARGV4}") - SEPARATE_ARGUMENTS(FLEX_EXECUTABLE_opts) - ELSE("${ARGV3}" STREQUAL "COMPILE_FLAGS") - MESSAGE(SEND_ERROR ${FLEX_TARGET_usage}) - ENDIF("${ARGV3}" STREQUAL "COMPILE_FLAGS") - ELSE(${ARGC} EQUAL 5) - MESSAGE(SEND_ERROR ${FLEX_TARGET_usage}) - ENDIF(${ARGC} EQUAL 5) - ENDIF(${ARGC} GREATER 3) - ADD_CUSTOM_COMMAND(OUTPUT ${Output} - COMMAND ${FLEX_EXECUTABLE} ${FLEX_EXECUTABLE_opts} -o${Output} ${Input} - DEPENDS ${Input} - COMMENT "[FLEX][${Name}] Building scanner with flex ${FLEX_VERSION}" - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) - - SET(FLEX_${Name}_DEFINED TRUE) - SET(FLEX_${Name}_OUTPUTS ${Output}) - SET(FLEX_${Name}_INPUT ${Input}) - SET(FLEX_${Name}_COMPILE_FLAGS ${FLEX_EXECUTABLE_opts}) - ENDMACRO(FLEX_TARGET) - - MACRO(ADD_FLEX_BISON_DEPENDENCY FlexTarget BisonTarget) - IF(NOT FLEX_${FlexTarget}_OUTPUTS) - MESSAGE(SEND_ERROR "Flex target `${FlexTarget}' does not exists.") - ENDIF(NOT FLEX_${FlexTarget}_OUTPUTS) - IF(NOT BISON_${BisonTarget}_OUTPUT_HEADER) - MESSAGE(SEND_ERROR "Bison target `${BisonTarget}' does not exists.") - ENDIF(NOT BISON_${BisonTarget}_OUTPUT_HEADER) - - SET_SOURCE_FILES_PROPERTIES(${FLEX_${FlexTarget}_OUTPUTS} - PROPERTIES OBJECT_DEPENDS ${BISON_${BisonTarget}_OUTPUT_HEADER}) - ENDMACRO(ADD_FLEX_BISON_DEPENDENCY) - -ENDIF(FLEX_EXECUTABLE) - -IF(NOT FLEX_FOUND) - IF(NOT FLEX_FIND_QUIETLY) - MESSAGE(STATUS "FLEX was not found.") - ELSE(NOT FLEX_FIND_QUIETLY) - IF(FLEX_FIND_REQUIRED) - MESSAGE(FATAL_ERROR "FLEX was not found.") - ENDIF(FLEX_FIND_REQUIRED) - ENDIF(NOT FLEX_FIND_QUIETLY) -ENDIF(NOT FLEX_FOUND) - -# FindFLEX.cmake ends here diff -Nru cgreen-1.3.0/cmake/Modules/FindNm.cmake cgreen-1.6.3/cmake/Modules/FindNm.cmake --- cgreen-1.3.0/cmake/Modules/FindNm.cmake 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/cmake/Modules/FindNm.cmake 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,5 @@ +find_program(NM_EXECUTABLE nm) + +if (NM_EXECUTABLE) + set(NM_FOUND TRUE) +endif() diff -Nru cgreen-1.3.0/cmake/Modules/FindValgrind.cmake cgreen-1.6.3/cmake/Modules/FindValgrind.cmake --- cgreen-1.3.0/cmake/Modules/FindValgrind.cmake 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/cmake/Modules/FindValgrind.cmake 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,11 @@ +if (NOT Valgrind_FOUND) + + find_program(Valgrind_EXECUTABLE valgrind) + + include(FindPackageHandleStandardArgs) + find_package_handle_standard_args(Valgrind DEFAULT_MSG Valgrind_EXECUTABLE) + + set(Valgrind_FOUND ${Valgrind_FOUND} CACHE BOOL "Flag whether Valgrind package was found") + mark_as_advanced(Valgrind_FOUND Valgrind_EXECUTABLE) + +endif() diff -Nru cgreen-1.3.0/cmake/Modules/MacroAddValgrindTest.cmake cgreen-1.6.3/cmake/Modules/MacroAddValgrindTest.cmake --- cgreen-1.3.0/cmake/Modules/MacroAddValgrindTest.cmake 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/cmake/Modules/MacroAddValgrindTest.cmake 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,28 @@ +# - MACRO_ADD_VALGRIND_TEST() +# +# Calls add_test() with all the but if on Win32 or Cygwin also adds the +# directory where the Cgreen library is generated to the path so that it will +# be used when running the test +# +# @thoni56/Thomas Nilefalk 2015-09-13 + +macro (macro_add_valgrind_test) + if (Valgrind_FOUND) + set( + libname + ${CMAKE_FIND_LIBRARY_PREFIXES}${ARGN}${CMAKE_SHARED_LIBRARY_SUFFIX} + ) + add_test( + NAME valgrind_${libname} + COMMAND sh -c "LD_LIBRARY_PATH=build/src valgrind --leak-check=full tools/cgreen-runner ${CMAKE_CURRENT_BINARY_DIR}/${libname} 2>1&" + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + set_tests_properties( + valgrind_${libname} PROPERTIES + FAIL_REGULAR_EXPRESSION "(definitely|indirectly|possibly) lost: [1-9]" + ) + if (CYGWIN OR WIN32) + set_tests_properties(${ARGV1} PROPERTIES ENVIRONMENT PATH=${PROJECT_BINARY_DIR}/src:$ENV{PATH}) + endif () + endif () +endmacro(macro_add_valgrind_test) diff -Nru cgreen-1.3.0/CMakeLists.txt cgreen-1.6.3/CMakeLists.txt --- cgreen-1.3.0/CMakeLists.txt 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/CMakeLists.txt 2023-10-03 15:16:52.000000000 +0000 @@ -1,5 +1,5 @@ # Required cmake version -cmake_minimum_required(VERSION 2.8.5) +cmake_minimum_required(VERSION 2.8.12) project(cgreen) @@ -11,13 +11,30 @@ find_package (Threads) +set(CGREEN_WITH_XML ON CACHE BOOL + "Add a simple XML report generator without external dependencies") +set(CGREEN_WITH_LIBXML2 ON CACHE BOOL + "Add an XML report generator which uses libxml2 for output formatting") + +if (CGREEN_WITH_LIBXML2) + find_package (LibXml2) + if (NOT LibXml2_FOUND) + set(CGREEN_WITH_LIBXML2 OFF) + endif (NOT LibXml2_FOUND) +endif (CGREEN_WITH_LIBXML2) + enable_testing() # global needed variables set(APPLICATION_NAME ${PROJECT_NAME}) + +# VERSION: +# NOTE: If you change version here, also change in +# include/cgreen/cgreen.h unless you write some code that +# automatically updates that... set(APPLICATION_VERSION_MAJOR "1") -set(APPLICATION_VERSION_MINOR "3") -set(APPLICATION_VERSION_PATCH "0") +set(APPLICATION_VERSION_MINOR "6") +set(APPLICATION_VERSION_PATCH "3") set(APPLICATION_VERSION ${APPLICATION_VERSION_MAJOR}.${APPLICATION_VERSION_MINOR}.${APPLICATION_VERSION_PATCH}${APPLICATION_VERSION_STATUS}) add_definitions(-DVERSION="${APPLICATION_VERSION}") @@ -25,6 +42,8 @@ set(LIBRARY_VERSION ${APPLICATION_VERSION_MAJOR}.${APPLICATION_VERSION_MINOR}.${APPLICATION_VERSION_PATCH}) set(LIBRARY_SOVERSION ${APPLICATION_VERSION_MAJOR}) + +# INSTALL: include(GNUInstallDirs) if(MSVC) @@ -33,6 +52,22 @@ set(CMAKE_INSTALL_LIBDIR ".") endif(MSVC) +# If OSX and using Homebrew use its install prefix +find_program(BREW brew) +if (BREW) + execute_process(COMMAND brew --prefix OUTPUT_VARIABLE CMAKE_INSTALL_PREFIX OUTPUT_STRIP_TRAILING_WHITESPACE) +endif() + +# COMPLETION: +set(BASHCOMPLETION_STATUS "Unavailable") +find_package(bash-completion QUIET) +if(BASH_COMPLETION_FOUND OR UNIX) + set(BASHCOMPLETION_STATUS "Available") + install(FILES tools/cgreen_completion.bash DESTINATION "${CMAKE_INSTALL_DATADIR}/bash-completion/completions" RENAME "cgreen-runner") + install(FILES tools/cgreen_completion.bash DESTINATION "${CMAKE_INSTALL_DATADIR}/bash-completion/completions" RENAME "cgreen-debug") +endif() + +# CMAKE MODULES: # where to look first for cmake modules, before ${CMAKE_ROOT}/Modules/ is checked set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/Modules @@ -48,6 +83,7 @@ include(MacroAddPlugin) include(MacroCopyFile) +# GIT REVISION: # Generate gitrevision.h if Git is available and the .git directory is found. find_program(GIT_EXECUTABLE git DOC "Git version control") mark_as_advanced(GIT_EXECUTABLE) @@ -62,29 +98,34 @@ OUTPUT_VARIABLE GITDIR OUTPUT_STRIP_TRAILING_WHITESPACE ) -get_filename_component(GITDIR "${GITDIR}" ABSOLUTE) +if (GITDIR) + get_filename_component(GITDIR "${GITDIR}" ABSOLUTE) +endif() # config.h checks include(ConfigureChecks.cmake) configure_file(config.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config.h) # check subdirectories +add_subdirectory(src) add_subdirectory(doc) add_subdirectory(include) -add_subdirectory(src) -if (UNIX OR MSYS) - # reflective runner only supported on UNIX/binutils platforms - add_subdirectory(tools) -endif(UNIX OR MSYS) -# add_subdirectory(samples) + +# Dependency on our own library so we can use CGREEN_LIBRARY in all subdirectories +if (CGREEN_WITH_STATIC_LIBRARY) + set(CGREEN_LIBRARY ${CGREEN_STATIC_LIBRARY}) +else () + set(CGREEN_LIBRARY ${CGREEN_SHARED_LIBRARY}) +endif() if (CGREEN_WITH_UNIT_TESTS) include(MacroAddUnitTest) include(MacroAddTest) + include(MacroAddValgrindTest) add_subdirectory(tests) if (UNIX OR MSYS) # reflective runner only supported on UNIX/binutils platforms - add_subdirectory(tools/tests) + add_subdirectory(tools) endif(UNIX OR MSYS) endif (CGREEN_WITH_UNIT_TESTS) diff -Nru cgreen-1.3.0/config.h.cmake cgreen-1.6.3/config.h.cmake --- cgreen-1.3.0/config.h.cmake 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/config.h.cmake 2023-10-03 15:16:52.000000000 +0000 @@ -9,8 +9,6 @@ #cmakedefine LIBDIR "${LIBDIR}" #cmakedefine PLUGINDIR "${PLUGINDIR}" #cmakedefine SYSCONFDIR "${SYSCONFDIR}" -#cmakedefine BINARYDIR "${BINARYDIR}" -#cmakedefine SOURCEDIR "${SOURCEDIR}" /************************** HEADER FILES *************************/ diff -Nru cgreen-1.3.0/ConfigureChecks.cmake cgreen-1.6.3/ConfigureChecks.cmake --- cgreen-1.3.0/ConfigureChecks.cmake 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/ConfigureChecks.cmake 2023-10-03 15:16:52.000000000 +0000 @@ -11,6 +11,3 @@ set(LIBDIR ${LIB_INSTALL_DIR}) set(PLUGINDIR "${PLUGIN_INSTALL_DIR}-${LIBRARY_SOVERSION}") set(SYSCONFDIR ${SYSCONF_INSTALL_DIR}) - -set(BINARYDIR ${PROJECT_BINARY_DIR}) -set(SOURCEDIR ${PROJECT_SOURCE_DIR}) diff -Nru cgreen-1.3.0/contrib/cgreen-mocker/cgreen-mocker.py cgreen-1.6.3/contrib/cgreen-mocker/cgreen-mocker.py --- cgreen-1.3.0/contrib/cgreen-mocker/cgreen-mocker.py 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/contrib/cgreen-mocker/cgreen-mocker.py 2023-10-03 15:16:52.000000000 +0000 @@ -41,8 +41,10 @@ from pycparser.plyparser import ParseError from pycparser import c_parser, c_ast, parse_file, c_generator from functools import reduce +from packaging import version import sys import os +import pycparser # This is not required if you've installed pycparser into # your site-packages/ with setup.py @@ -98,7 +100,9 @@ print(" return %s(" % ("*" if self.is_return_struct_by_value(node) else ""), end="") print(generator.visit(node.type), end="") - if isinstance(node.type, c_ast.PtrDecl) or self.is_return_struct_by_value(node): + if version.parse(pycparser.__version__) <= version.parse('2.19') \ + and isinstance(node.type, c_ast.PtrDecl) \ + or self.is_return_struct_by_value(node): print(" *", end="") print(") ", end="") else: diff -Nru cgreen-1.3.0/contrib/cgreen-mocker/requirements.txt cgreen-1.6.3/contrib/cgreen-mocker/requirements.txt --- cgreen-1.3.0/contrib/cgreen-mocker/requirements.txt 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/contrib/cgreen-mocker/requirements.txt 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,3 @@ +pycparser +packaging + diff -Nru cgreen-1.3.0/contrib/cgreen-mocker/test/cgreen-mocker cgreen-1.6.3/contrib/cgreen-mocker/test/cgreen-mocker --- cgreen-1.3.0/contrib/cgreen-mocker/test/cgreen-mocker 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/contrib/cgreen-mocker/test/cgreen-mocker 1970-01-01 00:00:00.000000000 +0000 @@ -1,272 +0,0 @@ -#!/usr/bin/env python -# ----------------------------------------------------------------- -# cgreen-mocker.py -# -# Create Cgreen mocks from extern declarations of functions, -# typically in a header file. -# -# Usage: -# cgreen-mocker.py { } -# -# : any 'cpp' directive but most useful is e.g. -# "-I " to ensure cpp finds files. -# -# : file with function declarations that you want -# to mock -# -# Simplistically adapted from pycparser example: func_defs.py -# -# Since it uses pycparser it will only handle C functions and you will -# probably need the pycparsers "fake_libc_include" to avoid parsing -# the whole world of libc headers. To use it, make a soft link with -# the name 'pycparser' in the directory you are running this from, or -# in the directory of 'cgreen-mocker' itself, to the top directory of -# the pycparser source, and cgreen-mocker will pick it up -# automatically. Or you can point to it using a command line -# 'cpp_directive' arg. -# -# Thanks to @gardenia for the pointer to pycparser! -# -# https://github.com/eliben/pycparser -# -# (C) 2016, Thomas Nilefalk -# -# Using pycparser for printing out all the functions defined in a -# C file. -# -# PyCParser - Copyright (C) 2008-2015, Eli Bendersky -# License: BSD -# ----------------------------------------------------------------- -from __future__ import print_function -from pycparser.plyparser import ParseError -from pycparser import c_parser, c_ast, parse_file, c_generator -from functools import reduce -import sys -import os - -# This is not required if you've installed pycparser into -# your site-packages/ with setup.py -sys.path.extend(['.', '..']) - -# Print on stderr - - -def eprint(*args, **kwargs): - print(*args, file=sys.stderr, **kwargs) - -# A visitor for FuncDef nodes that prints the -# Cgreen mock equivalent of the function - - -class FuncDefVisitor(c_ast.NodeVisitor): - def __init__(self, filename): - self._types = {} - self.filename = filename - - def visit_FuncDecl(self, node): - if node.coord.file == self.filename: - # Only consider definitions that are in the processed file - generator = c_generator.CGenerator() - try: - print(generator.visit(node), end="") - print(" { ") - self.should_return(node) - print("mock(%s);" % ", ".join(arg_list(node.args))) - print("}") - print() - except Exception as e: - print("ERROR: {} - Unexpected AST @ {}:{}:{}:".format(e, node.coord.file, - node.coord.line, node.coord.column)) - node.show() - return - - def visit_Typedef(self, node): - self._types[node.name] = { - 'is_pointer': isinstance(node.type, c_ast.PtrDecl), - } - if self._types[node.name]['is_pointer']: - self._types[node.name]['to_class'] = node.type.type.type.names - else: - self._types[node.name]['to_class'] = None - - def should_return(self, node): - generator = c_generator.CGenerator() - type = node.type - if is_double_decl(node): - print(" return unbox_double(", end="") - elif not is_void_decl(node): - print(" return %s(" % - ("*" if self.is_return_struct_by_value(node) else ""), end="") - print(generator.visit(node.type), end="") - if isinstance(node.type, c_ast.PtrDecl) or self.is_return_struct_by_value(node): - print(" *", end="") - print(") ", end="") - else: - print(" ", end="") - - def is_return_struct_by_value(self, node): - type = node.type - return not isinstance(type, c_ast.PtrDecl) and type.type.names[0] in self._types and not self._types[type.type.names[0]]['is_pointer'] - - def is_return_by_value_pointer(self, node): - type = node.type - return not isinstance(type, c_ast.PtrDecl) and self._types[type.type.names[0]]['is_pointer'] - - -def arg_list(args): - if args != None and len(args.params) > 0: - return [el for el in map(parameter_name_or_box_double, - filter(lambda x: not is_ellipsis_param(x), - args.params)) - if el is not None] - else: - return [] - - -def parameter_name_or_box_double(node): - if is_double_decl(node): - return "box_double({})".format(node.name) - else: - return node.name - - -def is_void_decl(node): - type = node.type - return isinstance(type, c_ast.TypeDecl) and type.type.names == ['void'] - - -def is_double_decl(node): - type = node.type - return isinstance(type, c_ast.TypeDecl) and type.type.names == ['double'] - - -def is_ellipsis_param(node): - return isinstance(node, c_ast.EllipsisParam) - - -def show_func_defs(args): - # Note that cpp is used. Provide a path to your own cpp or - # make sure one exists in PATH. - - pycparser_path = None - # Try to find a fake_libc - # In current directory? - if verbose: - eprint("Called in {0}".format( - os.path.abspath(os.path.dirname(sys.argv[0])))) - eprint("Looking for fake_lib in current directory...") - if os.path.isdir('pycparser'): - pycparser_path = r'./pycparser' - else: - this_script = os.path.abspath(__file__) - if verbose: - eprint( - "Looking for fake_lib in directory of script ({0})...".format(this_script)) - # Look in the directory of this script - while os.path.islink(this_script): - # If the script is a symlink, resolve it first, recursively... - # Note: can only handle absolute symlinks? - this_script = os.readlink(this_script) - if verbose: - eprint( - "Script was a symlink, resolving it to '{0}'...".format(this_script)) - if os.path.isdir(os.path.join(os.path.dirname(this_script), - 'pycparser')): - # Yes, there is a pycparser symlink here - pycparser_path = os.path.join(os.path.dirname(this_script), - 'pycparser') - - if pycparser_path: - pycparser_lib = reduce( - os.path.join, [pycparser_path, 'utils', 'fake_libc_include']) - if verbose: - print("/* Generated with cgreen-mocker and pycparser's fake_libc from %s */" % - (pycparser_path)) - elif verbose: - eprint("Not found") - - try: - options = [ - '-I'+pycparser_lib] if pycparser_path else [] - if add_gnuisms: - # And add some common GNUisms - options = options + [ - r'-D__gnuc_va_list(c)=', - r'-D__attribute__(x)=', - r'-D__extension__=', - r'-D__restrict=', - r'-D__inline=' - ] - if verbose: - eprint("Parsing with options = {0}".format(options)) - cpp_args = list(filter(None, options)) - ast = parse_file(args[-1], use_cpp=True, - cpp_args=cpp_args + args[0:-1]) - except ParseError as e: - print("ERROR: {} - C99 parse error".format(e)) - return - - print('/* -*- c -*-*/') # Suggest c-mode for Emacs - print('#include "%s"' % args[len(args)-1]) - print('#include ') - print() - v = FuncDefVisitor(args[-1]) - v.visit(ast) - - -def usage(): - print(""" -Usage: - cgreen-mocker.py { } - - : any 'cpp' directive but most useful are e.g. - "-I " to ensure cpp finds files and - "-D " to create an inline define - - : file with function declarations that you want - to mock - - Cgreen-mocker takes a header file and generates cgreen mocks for - all functions in it. It will print the generated mocks to standard - output so you can inspect it, or pipe it to a file that can be - compiled and linked with your tests. - - The mocker will only handle functions that are declared in the - header file you provide. This is based on the presumtion that the - header file represents functions in a unit. Aggregating functions - from multiple units into a single header for convenience is not - supported. Also the mocker cannot handle data declarations (yet?). - - If your header does not name some arguments you will not be able - to use those arguments in 'expect when' statements, of course. - - Cgreen-mocker will only generate mocks for the external functions - in the file you give as an argument, not those in included files. - - If cgreen-mocker encounters parse errors and they look like - gnu-isms you should get a copy of the source for pycparser (on - which cgreen-mocker is built). In it you will find a - 'fake_libc_include' which help. Create a symbolic link named - 'pycparser' that links to the root of pycparser source and - cgreen-mocker will find it itself. - - You can find pycparser at https://github.com/eliben/pycparser - -""") - - -if __name__ == "__main__": - if len(sys.argv) <= 1: - usage() - exit(-1) - if '-v' in sys.argv: - verbose = True - sys.argv.remove('-v') - else: - verbose = False - if '-gnu' in sys.argv: - add_gnuisms = True - sys.argv.remove('-gnu') - else: - add_gnuisms = False - show_func_defs(sys.argv[1:]) diff -Nru cgreen-1.3.0/contrib/completion/cgreen_bash_completion cgreen-1.6.3/contrib/completion/cgreen_bash_completion --- cgreen-1.3.0/contrib/completion/cgreen_bash_completion 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/contrib/completion/cgreen_bash_completion 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -#/usr/bin/env bash -# -# Contributed by Yavor Lulchev @RookieWookiee -# Improved a bit by @thoni56 - -# Thanks to https://stackoverflow.com/a/57243443/204658 -removeFromArray() { - arrayName="$1" - arrayNameAt="$arrayName"'[@]' - removeValue="$2" - mapfile -d '' -t "$arrayName" < <( - printf %s\\0 "${!arrayNameAt}" | grep -zvFx -- "$removeValue") -} - -_cgreen_runner_completion() -{ - local options libraries tests - options=("--colours" "--no-colours" "--xml" "--suite" "--verbose" "--no-run" "--help" " --version") - #echo options:"${options[@]}" > log - libraries=" $(\ls | grep -e '\b\.so\b' | tr '\n' ' ')" - tests="" - - # If a partial test is given we should match and only complete the matching tests - # Look for words in the command given so far - for word in ${COMP_WORDS[@]}; do - if echo $word | grep -q -E "\b\.so\b"; then - # If it was a library, check for tests in it - if test ! -f $word || test ! -x $word; then continue; fi - - local SUT="$(nm -f posix $word | grep -o -E 'CgreenSpec\w*?\b' | awk -F '__' '{ print $2 }' | uniq)" - local test_names=($(nm -f posix $word | grep -o -E 'CgreenSpec\w*?\b' | sed -e 's/CgreenSpec__[a-zA-Z0-9]\+\?__//' -e 's/__$//')) - - if test $SUT = "default" ; then - tests+=" $test_names" - else - local prefix="$SUT\\:" - tests+=" ${test_names[@]/#/$prefix}" - fi - fi - done - - # Remove all suggestions already used - # Strangely this only removes things from options, not libraries or tests - #echo "options:${options[@]}" >> log - #echo "libraries:${libraries[@]}" >> log - #echo "tests:${tests[@]}" >> log - for word in ${COMP_WORDS[@]}; do - echo \'$word\' >> log - removeFromArray options $word - removeFromArray libraries $word - removeFromArray tests $word - done - #echo "options:${options[@]}" >> log - #echo "libraries:${libraries[@]}" >> log - #echo "tests:${tests[@]}" >> log - - - COMPREPLY=($(compgen -W '$(printf "%s " ${options[@]} ${libraries[@]} ${tests[@]})' -- "${COMP_WORDS[$COMP_CWORD]}")) -} - -complete -o nosort -o dirnames -F _cgreen_runner_completion cgreen-runner -complete -o nosort -o dirnames -F _cgreen_runner_completion cgreen-debug diff -Nru cgreen-1.3.0/debian/cgreen1.install cgreen-1.6.3/debian/cgreen1.install --- cgreen-1.3.0/debian/cgreen1.install 2020-08-14 07:32:34.000000000 +0000 +++ cgreen-1.6.3/debian/cgreen1.install 2023-10-18 22:04:14.000000000 +0000 @@ -1,4 +1,6 @@ usr/bin/cgreen-runner usr/bin/cgreen-debug +usr/share/bash-completion/completions/cgreen-runner +usr/share/bash-completion/completions/cgreen-debug usr/share/man/man1/cgreen-runner.1 usr/share/man/man1/cgreen-debug.1 diff -Nru cgreen-1.3.0/debian/changelog cgreen-1.6.3/debian/changelog --- cgreen-1.3.0/debian/changelog 2020-11-12 05:21:15.000000000 +0000 +++ cgreen-1.6.3/debian/changelog 2023-10-18 22:04:36.000000000 +0000 @@ -1,3 +1,61 @@ +cgreen (1.6.3-1~ppa1) mantic; urgency=medium + + * No-change rebuild. + + -- Lucas Kanashiro Wed, 18 Oct 2023 19:04:36 -0300 + +cgreen (1.6.3-1) unstable; urgency=medium + + * Import new upstream release (Closes: #1038145) + * Remove patch files which are fixed by upstream + * Update symbols + + -- Gavin Lai (賴建宇) Tue, 03 Oct 2023 23:11:21 +0800 + +cgreen (1.6.2-1~exp2) experimental; urgency=medium + + * d/rules: + - Collect debug log when dh_auto_test fails + + -- Gavin Lai (賴建宇) Sun, 24 Sep 2023 22:14:51 +0800 + +cgreen (1.6.2-1~exp1) experimental; urgency=medium + + * Import new upstream release + * d/control: + - Bump Standards-Version to 4.6.2 + - Remove libbdf-dev from build-deps (Closes: #1010589) + - Add libxml2-dev to build-deps + - Add Recommends libcgreen1-dev in cgreen1 + * Update symbols + * Add a patch to fix man page error + + -- Gavin Lai (賴建宇) Tue, 15 Aug 2023 23:18:11 +0800 + +cgreen (1.5.1-1) unstable; urgency=medium + + * Import new upstream release + * Update symbols + * d/controls: + - Add libbfd-dev to build-deps + + -- SZ Lin (林上智) Sat, 16 Apr 2022 21:46:26 +0800 + +cgreen (1.4.1-1) unstable; urgency=medium + + * Import new upstream release + * d/control: + - Bump Standards-Version to 4.6.0.1 + * d/copyright: + - Update copyright content + * d/test: + - Mark test as superficial (Closes: #974443) + * Remove unneeded patches + * Add bash-completion files + * Update symbol table + + -- SZ Lin (林上智) Sun, 06 Mar 2022 20:24:51 +0800 + cgreen (1.3.0-2) unstable; urgency=medium [ Alvin Chen ] diff -Nru cgreen-1.3.0/debian/control cgreen-1.6.3/debian/control --- cgreen-1.3.0/debian/control 2020-08-14 07:32:34.000000000 +0000 +++ cgreen-1.6.3/debian/control 2023-10-18 22:04:14.000000000 +0000 @@ -1,10 +1,10 @@ Source: cgreen Section: libs Priority: optional -Maintainer: Alvin Chen +Maintainer: Gavin Lai (賴建宇) Uploaders: SZ Lin (林上智) -Build-Depends: cmake, ruby-asciidoctor-pdf, debhelper-compat (= 13) -Standards-Version: 4.5.0 +Build-Depends: cmake, libxml2-dev, ruby-asciidoctor-pdf, debhelper-compat (= 13) +Standards-Version: 4.6.2 Homepage: https://github.com/cgreen-devs/cgreen Rules-Requires-Root: no Vcs-Browser: https://salsa.debian.org/debian/cgreen @@ -51,6 +51,7 @@ Section: utils Architecture: any Depends: ${misc:Depends}, ${shlibs:Depends} +Recommends: libcgreen1-dev Description: Cgreen Library - Library tool A modern unit test and mocking framework for C and C++. Cgreen features - fast build, clean code, highly portable diff -Nru cgreen-1.3.0/debian/copyright cgreen-1.6.3/debian/copyright --- cgreen-1.3.0/debian/copyright 2020-08-14 07:32:34.000000000 +0000 +++ cgreen-1.6.3/debian/copyright 2023-10-18 22:04:14.000000000 +0000 @@ -3,7 +3,7 @@ Source:https://github.com/cgreen-devs/cgreen Files: * -Copyright: 2006-2018, Cgreen Development Team and contributors +Copyright: 2006-2022, Cgreen Development Team and contributors License: ISC License: ISC @@ -24,7 +24,7 @@ Files: debian/* Copyright: 2020, Alvin Chen - 2020, SZ Lin (林上智) + 2020-2022, SZ Lin (林上智) License: ISC Files: contrib/cgreen-mocker/cgreen-mocker.py diff -Nru cgreen-1.3.0/debian/libcgreen1.symbols cgreen-1.6.3/debian/libcgreen1.symbols --- cgreen-1.3.0/debian/libcgreen1.symbols 2020-08-14 07:32:34.000000000 +0000 +++ cgreen-1.6.3/debian/libcgreen1.symbols 2023-10-18 22:04:14.000000000 +0000 @@ -54,6 +54,7 @@ count_tests@Base 1.2.0 create_begins_with_string_constraint@Base 1.2.0 create_breadcrumb@Base 1.2.0 + create_capture_parameter_constraint@Base 1.5.1 create_cdash_reporter@Base 1.2.0 create_cgreen_value@Base 1.2.0 create_cgreen_vector@Base 1.2.0 @@ -71,13 +72,18 @@ create_equal_to_value_constraint@Base 1.2.0 create_greater_than_double_constraint@Base 1.2.0 create_greater_than_value_constraint@Base 1.2.0 + create_is_false_constraint@Base 1.4.1 + create_is_null_constraint@Base 1.4.1 + create_is_true_constraint@Base 1.4.1 create_less_than_double_constraint@Base 1.2.0 create_less_than_value_constraint@Base 1.2.0 + create_libxml_reporter@Base 1.6.1 create_named_test_suite_@Base 1.2.0 create_not_equal_to_contents_constraint@Base 1.2.0 create_not_equal_to_double_constraint@Base 1.2.0 create_not_equal_to_string_constraint@Base 1.2.0 create_not_equal_to_value_constraint@Base 1.2.0 + create_not_null_constraint@Base 1.4.1 create_reporter@Base 1.2.0 create_return_by_value_constraint@Base 1.2.0 create_return_double_value_constraint@Base 1.2.0 @@ -88,8 +94,10 @@ create_vector_of_names@Base 1.2.0 create_with_side_effect_constraint@Base 1.2.0 create_xml_reporter@Base 1.2.0 + current_test@Base 1.6.3 defaultContext@Base 1.2.0 destroy_breadcrumb@Base 1.2.0 + destroy_by_value_constraint@Base 1.4.1 destroy_cgreen_value@Base 1.2.0 destroy_cgreen_vector@Base 1.2.0 destroy_constraint@Base 1.2.0 @@ -122,14 +130,10 @@ is_content_comparing@Base 1.2.0 is_content_setting@Base 1.2.0 is_double_comparing@Base 1.2.0 - is_false@Base 1.2.0 - is_non_null@Base 1.2.0 is_not_comparing@Base 1.2.0 is_not_content_setting@Base 1.2.0 - is_null@Base 1.2.0 is_parameter@Base 1.2.0 is_string_comparing@Base 1.2.0 - is_true@Base 1.2.0 make_cgreen_by_value@Base 1.2.0 make_cgreen_double_value@Base 1.2.0 make_cgreen_integer_value@Base 1.2.0 @@ -164,6 +168,7 @@ set_cdash_reporter_vprinter@Base 1.2.0 set_cute_reporter_printer@Base 1.2.0 set_cute_reporter_vprinter@Base 1.2.0 + set_libxml_reporter_printer@Base 1.6.1 set_reporter_options@Base 1.2.0 set_setup@Base 1.2.0 set_teardown@Base 1.2.0 @@ -174,10 +179,6 @@ show_null_as_the_string_null@Base 1.2.0 significant_figures_for_assert_double_are@Base 1.2.0 start_cgreen_messaging@Base 1.2.0 - static_is_false_constraint@Base 1.2.0 - static_is_non_null_constraint@Base 1.2.0 - static_is_null_constraint@Base 1.2.0 - static_is_true_constraint@Base 1.2.0 string_contains@Base 1.2.0 string_dup@Base 1.2.0 strings_are_equal@Base 1.2.0 diff -Nru cgreen-1.3.0/debian/patches/fix-endian-issue-of-tests.patch cgreen-1.6.3/debian/patches/fix-endian-issue-of-tests.patch --- cgreen-1.3.0/debian/patches/fix-endian-issue-of-tests.patch 2020-11-12 05:20:42.000000000 +0000 +++ cgreen-1.6.3/debian/patches/fix-endian-issue-of-tests.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,71 +0,0 @@ -Fix Endian issue on big endian arch. -Forwarded: https://github.com/cgreen-devs/cgreen/pull/241 ---- a/tests/constraint_messages_tests.c -+++ b/tests/constraint_messages_tests.c -@@ -39,7 +39,7 @@ - Ensure(ConstraintMessage, for_is_equal_to_hex) { - unsigned char bytes[4]; - memset(bytes, 0xaa, sizeof(bytes)); -- assert_that(bytes[0], is_equal_to_hex(0xbb)); -+ assert_that((unsigned char) bytes[0], is_equal_to_hex(0xbb)); - } - - Ensure(ConstraintMessage, for_is_not_equal_to) { -@@ -60,8 +60,8 @@ - - // Contents of struct/memory - Ensure(ConstraintMessage, for_is_equal_to_contents_of) { -- int forty_five[45] = {45, 44, 43}, thirty_three[33] = {45, 44, 33}; -- assert_that(thirty_three, is_equal_to_contents_of(forty_five, 55)); -+ char forty_five[45] = {45, 44, 43}, thirty_three[33] = {45, 44, 33}; -+ assert_that(thirty_three, is_equal_to_contents_of(forty_five, 45)); - } - - Ensure(ConstraintMessage, for_is_not_equal_to_contents_of) { ---- a/tests/constraint_messages_tests.expected -+++ b/tests/constraint_messages_tests.expected -@@ -52,7 +52,7 @@ - - constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_equal_to_contents_of - Expected [thirty_three] to [equal contents of] [forty_five] -- at offset: [8] -+ at offset: [2] - actual value: [0x21] - expected value: [0x2b] - -@@ -62,7 +62,7 @@ - expected value: [3.300000] - - constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_equal_to_hex -- Expected [bytes[0]] to [equal] [0xbb] -+ Expected [(unsigned char) bytes[0]] to [equal] [0xbb] - actual value: [0xaa] - expected value: [0xbb] - ---- a/tests/custom_constraint_messages_tests.c -+++ b/tests/custom_constraint_messages_tests.c -@@ -82,12 +82,12 @@ - - */ - typedef struct Box { -- int id; -+ char id; - int size; - } Box; - - typedef struct Piece { -- int id; -+ char id; - int size; - } Piece; - -@@ -127,7 +127,7 @@ - #define can_fit_in_box(box) create_piece_fit_in_box_constraint((intptr_t)box, #box) - - Ensure(CustomConstraint, more_complex_custom_constraint_function) { -- Box box1 = {.id = 1, .size = 5}; -- Piece piece99 = {.id = 99, .size = 6}; -+ Box box1 = {.id = (char)1, .size = 5}; -+ Piece piece99 = {.id = (char)99, .size = 6}; - assert_that(&piece99, can_fit_in_box(&box1)); - } diff -Nru cgreen-1.3.0/debian/patches/fix-file-references-package-build-path.patch cgreen-1.6.3/debian/patches/fix-file-references-package-build-path.patch --- cgreen-1.3.0/debian/patches/fix-file-references-package-build-path.patch 2020-08-14 07:32:34.000000000 +0000 +++ cgreen-1.6.3/debian/patches/fix-file-references-package-build-path.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -fix file references package build path ---- a/include/cgreen/suite.h -+++ b/include/cgreen/suite.h -@@ -16,8 +16,8 @@ - extern "C" { - #endif - --#define create_test_suite() create_named_test_suite_(__func__, __FILE__, __LINE__) --#define create_named_test_suite(name) create_named_test_suite_(name, __FILE__, __LINE__) -+#define create_test_suite() create_named_test_suite_(__func__, "", __LINE__) -+#define create_named_test_suite(name) create_named_test_suite_(name, "", __LINE__) - #define add_test(suite, test) add_test_(suite, STRINGIFY_TOKEN(test), &spec_name(default, test)) - #define add_test_with_context(suite, context, test) add_test_(suite, STRINGIFY_TOKEN(test), &spec_name(context, test)) - #define add_tests(suite, ...) add_tests_(suite, #__VA_ARGS__, (CgreenTest *)__VA_ARGS__) ---- a/src/suite.c -+++ b/src/suite.c -@@ -13,7 +13,7 @@ - - CgreenContext defaultContext = { - /* name */ "", -- /* filename */ __FILE__, -+ /* filename */ "", - /* setup */ &do_nothing, - /* teardown */ &do_nothing - }; ---- a/src/utils.h -+++ b/src/utils.h -@@ -10,7 +10,7 @@ - - extern bool panic_use_colours; - --#define PANIC(...) panic(__FILE__, __LINE__, __VA_ARGS__) -+#define PANIC(...) panic(__FUNCTION__, __LINE__, __VA_ARGS__) - extern char *string_dup(const char *original); - extern void panic_set_output_buffer(const char *buffer); - extern void panic(const char *filename, int line, const char *fmt, ...); diff -Nru cgreen-1.3.0/debian/patches/fix-wrong-path-for-interpreter.patch cgreen-1.6.3/debian/patches/fix-wrong-path-for-interpreter.patch --- cgreen-1.3.0/debian/patches/fix-wrong-path-for-interpreter.patch 2020-08-14 07:32:34.000000000 +0000 +++ cgreen-1.6.3/debian/patches/fix-wrong-path-for-interpreter.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -fix wrong path for interpreter -Index: cgreen1/tools/cgreen-debug -=================================================================== ---- cgreen1.orig/tools/cgreen-debug -+++ cgreen1/tools/cgreen-debug -@@ -1,4 +1,4 @@ --#!/usr/bin/bash -+#!/bin/bash - # cgreen-debug - # - # Script to start cgreen-runner under gdb, load a library and break diff -Nru cgreen-1.3.0/debian/patches/series cgreen-1.6.3/debian/patches/series --- cgreen-1.3.0/debian/patches/series 2020-11-12 05:20:42.000000000 +0000 +++ cgreen-1.6.3/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -fix-endian-issue-of-tests.patch -fix-file-references-package-build-path.patch -fix-wrong-path-for-interpreter.patch diff -Nru cgreen-1.3.0/debian/rules cgreen-1.6.3/debian/rules --- cgreen-1.3.0/debian/rules 2020-11-12 05:20:42.000000000 +0000 +++ cgreen-1.6.3/debian/rules 2023-10-18 22:04:14.000000000 +0000 @@ -12,7 +12,10 @@ dh $@ override_dh_auto_test: - LD_LIBRARY_PATH="$(BUILD)/src/" dh_auto_test + if ! LD_LIBRARY_PATH="$(BUILD)/src/" dh_auto_test; then \ + cat $(BUILD)/Testing/Temporary/LastTest.log; \ + exit 1; \ + fi override_dh_auto_configure: [ -d $(BUILD) ] || mkdir $(BUILD) diff -Nru cgreen-1.3.0/debian/tests/control cgreen-1.6.3/debian/tests/control --- cgreen-1.3.0/debian/tests/control 2020-08-14 07:32:34.000000000 +0000 +++ cgreen-1.6.3/debian/tests/control 2023-10-18 22:04:14.000000000 +0000 @@ -1,3 +1,3 @@ Tests: testsuite -Restrictions: allow-stderr +Restrictions: allow-stderr, superficial Depends: @ diff -Nru cgreen-1.3.0/debian-control cgreen-1.6.3/debian-control --- cgreen-1.3.0/debian-control 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/debian-control 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -Source: cgreen -Section: devel -Priority: optional -Maintainer: Thomas Nilefalk -Build-Depends: debhelper (>= 10), cmake -Standards-Version: 4.0.0 -Homepage: https://github.com/cgreen-devs/cgreen -Package: cgreen -Version: 1.3.0 -Architecture: amd64 -Description: Unit tests and mocking framework for C and C++ - A modern unit test and mocking framework for C and C++. Cgreen features - - fast build, clean code, highly portable - - simple auto-discovery of tests - - fluent, expressive and readable API - - each test runs in isolation to prevent cross-test dependencies - - built-in mocking for C, compatible other C++ mocking libraries Binary files /tmp/tmpe868a3ee/1nFRutB76r/cgreen-1.3.0/doc/avtar.png and /tmp/tmpe868a3ee/tEA2Un2PZU/cgreen-1.6.3/doc/avtar.png differ diff -Nru cgreen-1.3.0/doc/cgreen-guide-en.asciidoc cgreen-1.6.3/doc/cgreen-guide-en.asciidoc --- cgreen-1.3.0/doc/cgreen-guide-en.asciidoc 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/cgreen-guide-en.asciidoc 2023-10-03 15:16:52.000000000 +0000 @@ -1,9 +1,14 @@ +////////////////////////////////////////////////////////////////////// +In this Asciidoc document we use the convention that one sentence is kept on a single line. +This creates nice diffs. +////////////////////////////////////////////////////////////////////// :source-highlighter: highlightjs :icons: font :numbered: :toc: left :pp: ++ - +:title-page: +:title-logo-image: logo.png ifdef::env-github[] :tip-caption: :bulb: :note-caption: :information_source: @@ -13,29 +18,24 @@ endif::[] = Cgreen : Unit Tests, Stubbing and Mocking for C and C++ -v{VERSION} +v{VERSION} - Generated {localdate} == Cgreen Quickstart Guide === What is Cgreen? -*Cgreen* is a unit tester for the C and C++ software developer, a test -automation and software quality assurance tool for programmers and -development teams. The tool is completely open source published under -the https://github.com/cgreen-devs/cgreen/blob/master/LICENSE[ISC, -OpenBSD, license]. - -Unit testing is a development practice popularised by the agile -development community. It is characterised by writing many small -tests alongside the normal code. Often the tests are written before -the code they are testing, in a tight test-code-refactor loop. Done -this way, the practice is known as Test Driven Development. *Cgreen* -was designed specifically to support this style of development. - -Unit tests are written in the same language as the code, in our case -C or C++. This avoids the mental overhead of constantly switching language, -and also allows you to use any application code in your tests. +*Cgreen* is a unit testing framework for the C and C++ software developer, a test automation and software quality assurance tool for programmers and development teams. +The tool is completely open source published under the https://github.com/cgreen-devs/cgreen/blob/master/LICENSE[ISC, OpenBSD, license]. + +Unit testing is a development practice popularised by the agile development community. +It is characterised by writing many small tests alongside the normal code. +Often the tests are written before the code they are testing, in a tight test-code-refactor loop. +Done this way, the practice is known as Test Driven Development. + *Cgreen* was designed specifically to support this style of development. + +Unit tests are written in the same language as the code, in our case C or C++. +This avoids the mental overhead of constantly switching language, and also allows you to use any application code in your tests. Here are some of its features: @@ -52,37 +52,25 @@ - Fully composable test suites - A single test can be run in a single process for easier debugging -*Cgreen* also supports the classic xUnit-style assertions for easy -porting from other frameworks. +*Cgreen* also supports the classic xUnit-style assertions for easy porting from other frameworks. -*Cgreen* was initially developed to support C programming, but there -is also support for C{pp}. It was initially a spinoff from a research -project at Wordtracker and created by Marcus Baker. Significant -additions by Matt Hargett and continuous nurturing by Thomas Nilefalk -has made *Cgreen* what it is today. +*Cgreen* was initially developed to support C programming, but there is also support for C{pp}. +It was initially a spinoff from a research project at Wordtracker and created by Marcus Baker. +Significant additions by Matt Hargett and continuous nurturing by Thomas Nilefalk has made *Cgreen* what it is today. === Cgreen - Vanilla or Chocolate? -Test driven development (TDD) really catched on when the JUnit -framework for Java spread to other langauges, giving us a family of -https://en.wikipedia.org/wiki/XUnit[xUnit] tools. *Cgreen* was born in -this wave and have many similarities to the xUnit family. - -But TDD evolved over time and modern thinking and practice is more -along the lines of BDD, an acronym for Behaviour Driven Development, -made popular by people like Dan North and frameworks like JBehave, -RSpec, Cucumber and Jasmine. - -*Cgreen* follows this trend and has evolved to embrace a BDD-flavoured -style of testing. Although the fundamental mechanisms in TDD and -'technical' BDD are much the same, the shift in focus by changing -wording from 'tests' to 'behaviour specifications' is very -significant. - -This document will present *Cgreen* using the more modern and better -BDD-style. In a later section you can have a peek at the classic TDD -API, but you should consider that as outdated. +Test driven development (TDD) really catched on when the JUnit framework for Java spread to other langauges, giving us a family of https://en.wikipedia.org/wiki/XUnit[xUnit] tools. +*Cgreen* was born in this wave and have many similarities to the xUnit family. + +But TDD evolved over time and modern thinking and practice is more along the lines of BDD, an acronym for Behaviour Driven Development, made popular by people like Dan North and frameworks like JBehave, RSpec, Cucumber and Jasmine. + +*Cgreen* follows this trend and has evolved to embrace a BDD-flavoured style of testing. +Although the fundamental mechanisms in TDD and 'technical' BDD are much the same, the shift in focus by changing wording from 'tests' to 'behaviour specifications' is very significant. + +This document will present *Cgreen* using the more modern and better BDD-inspired style. +In a later section you can have a peek at the classic xUnit-family TDD API, but you should consider that as outdated. === Installing Cgreen @@ -91,27 +79,22 @@ ==== Installing a package -NOTE: At this point there are few supported pre-built packages -available. For now you'll probably have to build from source. +The first way is to use packages provided by the *Cgreen* Team and porters for the various operating systems. +If your system uses a package manager ('apt', 'yum', 'brew' and so on) there might be a prebuilt package that you can just install using your systems package manager. -The first way is to use packages provided by the *Cgreen* Team. If -your system uses a package manager ('apt' or 'port' and so on) there -might be a prebuilt package that you can just install using your -systems package manager. - -If no *Cgreen* package is distributed for your system you can download -a package from https://github.com/cgreen-devs/cgreen/releases[Cgreen -GitHub project]. Install it using the normal procedures for your -system. +If no *Cgreen* package is distributed for your system you can download a package from https://github.com/cgreen-devs/cgreen/releases[Cgreen GitHub project]. +Install it using the normal procedures for your system. + +NOTE: At this point there are pre-built packages available for quite a few environments. +They are not all using the latest version, though. +If you need that, you can still build from source. ==== Installing from source -The second way is available for developers and advanced -users. Basically this consists of fetching the sources of the project -on https://github.com/cgreen-devs/cgreen[GitHub], just click on -"Download ZIP", and compiling them. To do this you need the -http://www.cmake.org[CMake] build system. +A second way is available for developers and advanced users. +Basically this consists of fetching the sources of the project on https://github.com/cgreen-devs/cgreen[GitHub], just click on "Download ZIP", and then compile them. +To do this you need the http://www.cmake.org[CMake] build system. Once you have the CMake tool installed, the steps are: @@ -123,62 +106,46 @@ $ make install ----------------------------------------- -The initial `make` command will configure the build process and -create a separate `build` directory before going there and building -using *CMake*. This is called an 'out of source build'. It compiles -*Cgreen* from outside the sources directory. This helps the overall -file organization and enables multi-target builds from the same -sources by leaving the complete source tree untouched. - -TIP: Experienced users may tweak the build configuration by going to -the build subdirectory and use `ccmake ..` to modify the build -configuration in that subtree. - -TIP: The Makefile is just there for convenience, it just creates the -build directory and invokes *CMake* there, so that you don't have -to. This means that experienced *CMake* users can just do as they -normally do with a *CMake*-based project instead of invoking `make`. - -The build process will create a library (on unix called -`libcgreen.so`) which can be used in conjunction with the `cgreen.h` -header file to compile and link your test code. The created library is -installed in the system, by default in the `/usr/local/lib/`. +The initial `make` command will configure the build process and create a separate `build` directory before going there and building using *CMake*. +This is called an 'out of source build'. +It compiles *Cgreen* from outside the sources directory. +This helps the overall file organization and enables multi-target builds from the same sources by leaving the complete source tree untouched. + +TIP: Experienced users may tweak the build configuration by going to the build subdirectory and use `ccmake ..` to modify the build configuration in that subtree. + +TIP: The Makefile is just there for convenience, it creates the build directory and invokes *CMake* there, so that you don't have to. +This means that experienced *CMake* users can just do as they normally do with a *CMake*-based project instead of invoking `make`. + +The build process will create a library (on unix called `libcgreen.so`) which can be used in conjunction with the `cgreen.h` header file to compile and link your test code. +The created library is installed in the system directories, by default in `/usr/local/lib/`. ==== Your First Test -We will start demonstrating the use of *Cgreen* by writing some tests for *Cgreen* itself -to confirm that everything is working as it should. Let's start with a -simple test module with no tests, called `first_test.c`... +We will start demonstrating the use of *Cgreen* by writing some tests for *Cgreen* itself to confirm that everything is working as it should. +Let's start with a simple test module with no tests, called `first_test.c`... [source,c] --------------------------------------- include::tutorial_src/first_tests0.c[] --------------------------------------- -This is very unexciting. It just creates an empty test suite and runs -it. It's usually easier to proceed in small steps, and this is the -smallest one I could think of. The only complication is the `cgreen.h` -header file and the mysterious looking "declarations" at the beginning -of the file. +This is very unexciting. +It just creates an empty test suite and runs it. +It's usually easier to proceed in small steps, and this is the smallest one I could think of. +The only complication is the `cgreen.h` header file and the mysterious looking "declarations" at the beginning of the file. The BDD flavoured *Cgreen* notation calls for a System Under Test -(SUT), or a 'context'. The declarations give a context to the tests -and it also makes it more natural to talk about which module or class, -the system under test, is actually responsible for the functionality -we are expressing. In one way we are 'describing', or spec'ing, the -functionality of the SUT. That's what the `Describe();` does. And for -technical reasons (actually requirements of the C language), you must -declare the `BeforeEach()` and `AfterEach()` functions even if they -are empty. (You will get strange errors if you don't!) - -NOTE: We are using the name "Cgreen" as the SUT in these first -examples, as *Cgreen* itself is the object or class we want to -test or describe. - -I am assuming you have the *Cgreen* folder in the include search -path to ensure compilation works, otherwise you'll need to add that in -the compilation command. +(SUT), or a 'context'. +The declarations give a context to the tests and it also makes it more natural to talk about which module or class, the system under test, is actually responsible for the functionality we are describing. +In one way we are 'describing', or spec'ing, the functionality of the SUT. +That's what the `Describe();` does. +And for technical reasons (actually requirements of the C language), you must declare the `BeforeEach()` and `AfterEach()` functions even if they are empty. +(You will get strange errors if you don't!) + +NOTE: We are using the name "Cgreen" as the SUT in these first examples, as *Cgreen* itself is the object or class we want to test or describe. + +I am assuming you have the *Cgreen* folder in the include search path to ensure compilation works, otherwise you'll need to add that in the compilation command. Then, building this test is, of course, trivial... @@ -194,25 +161,22 @@ include::tutorial_src/first0.out[] ----------------------------- -All of the above rather assumes you are working in a Unix like -environment, probably with 'gcc'. The code is pretty much standard -C99, so any C compiler should work. *Cgreen* should compile on all -systems that support the `sys/msg.h` messaging library. It has been -tested on Linux, MacOSX, Cygwin and Windows. - -So far we have tried compilation, and shown that the test suite -actually runs. Let's add a meaningless test or two so that you can -see how it runs... +All of the above rather assumes you are working in a Unix like environment, probably with 'gcc'. +The code is pretty much standard C99, so any C compiler should work. +*Cgreen* should compile on all systems that support the `sys/msg.h` messaging library. +It has been tested on Linux, MacOSX, Cygwin. +If you are on Windows we would be glad if you could figure out how to build there. + +So far we have tried compilation, and shown that the test suite actually runs. +Let's add a meaningless test or two so that you can see how it runs... [source,c] ----------------------------- include::tutorial_src/first_tests1.c[] ----------------------------- -A test is denoted by the macro `Ensure` which takes an optional -context (`Cgreen`) and a, hopefully descriptive, testname -(`passes_this_test`). You add the test to your suite using -`add_test_with_context()`. +A test is denoted by the macro `Ensure` which takes an optional context (`Cgreen`) and a, hopefully descriptive, testname (`passes_this_test`). +You add the test to your suite using `add_test_with_context()`. On compiling and running, we now get the output... @@ -220,23 +184,19 @@ include::tutorial_src/first1.out[] ----------------------------- -The `TextReporter`, created by the call to `create_text_reporter()`, is -the easiest way to output the test results. It prints the failures as -intelligent and expressive text messages on your console. - -Of course "0" would never equal "1", but this shows that *Cgreen* -presents the value you expect (`[be true]`) __and__ the expression -that you want to assert (`[0 == 1]`). We can also see a handy short -form for asserting boolean expressions (`assert_that(0 == 1);`). +The `TextReporter`, created by the call to `create_text_reporter()`, is the easiest way to output the test results. +It prints the failures as intelligent and expressive text messages on your console. + +Of course "0" would never equal "1", but this shows that *Cgreen* presents the value you expect (`[be true]`) __and__ the expression that you want to assert (`[0 == 1]`). +We can also see a handy short form for asserting boolean expressions (`assert_that(0 == 1);`). [[tdd_with_cgreen]] === Five Minutes Doing TDD with Cgreen -For a more realistic example we need something to test. We'll pretend -that we are writing a function to split the words of a sentence in -place. It would do this by replacing any spaces with string -terminators and returns the number of conversions plus one. Here is -an example of what we have in mind... +For a more realistic example we need something to test. +We'll pretend that we are writing a function to split the words of a sentence in place. +It would do this by replacing any spaces with string terminators and returns the number of conversions plus one. +Here is an example of what we have in mind... [source,c] ------------------------------- @@ -244,14 +204,12 @@ word_count = split_words(sentence); ------------------------------- -The variable `sentence` should now point at -"Just\0the\0first\0test". Not an obviously useful function, but we'll -be using it for something more practical later. - -This time around we'll add a little more structure to our -tests. Rather than having the test as a stand alone program, we'll -separate the runner from the test cases. That way, multiple test -suites of test cases can be included in the `main()` runner file. +The variable `sentence` should now point at "Just\0the\0first\0test". +Not an obviously useful function, but we'll be using it for something more practical later. + +This time around we'll add a little more structure to our tests. +Rather than having the test as a stand alone program, we'll separate the runner from the test cases. +That way, multiple test suites of test cases can be included in the `main()` runner file. This makes it less work to add more tests later. Here is the, so far empty, test case in `words_test.c`... @@ -268,13 +226,11 @@ include::tutorial_src/all_tests.c[] ------------------------------- -*Cgreen* has two ways of running tests. The default is to run all -tests in their own protected processes. This is what happens if you -invoke `run_test_suite()`. All tests are then completely independent -since they run in separate processes, preventing a single run-away -test from bringing the whole program down with it. It also ensures -that one test cannot leave any state to the next, thus forcing you to -setup the prerequisites for each test correctly and clearly. +*Cgreen* has two ways of running tests. +The default is to run all tests in their own protected processes. +This is what happens if you invoke `run_test_suite()`. +All tests are then completely independent since they run in separate processes, preventing a single run-away test from bringing the whole program down with it. +It also ensures that one test cannot leave any state to the next, thus forcing you to setup the prerequisites for each test correctly and clearly. Building this scaffolding... @@ -290,11 +246,9 @@ include::tutorial_src/words0.out[] ------------------------------- -Note that we get an extra level of output here, we have both `main` -and `words_tests`. That's because `all_tests.c` adds the words test -suite to its own (named `main` since it was created in the function -`main()`). All this scaffolding is pure overhead, but from now on -adding tests will be a lot easier. +Note that we get an extra level of output here, we have both `main` and `words_tests`. +That's because `all_tests.c` adds the words test suite to its own (named `main` since it was created in the function `main()`). +All this scaffolding is pure overhead, but from now on adding tests will be a lot easier. Here is a first test for `split_words()` in `words_test.c`... @@ -303,10 +257,10 @@ include::tutorial_src/words_tests1.c[] ------------------------------- -The `assert_that()` macro takes two parameters, the value to assert -and a constraint. The constraints comes in various forms. In this case -we use the probably most common, `is_equal_to()`. With the default -`TextReporter` the message is sent to `STDOUT`. +The `assert_that()` macro takes two parameters, the value to assert and a constraint. +The constraints comes in various forms. +In this case we use the probably most common, `is_equal_to()`. +With the default `TextReporter` the message is sent to `STDOUT`. To get this to compile we need to create the `words.h` header file... @@ -338,17 +292,13 @@ include::tutorial_src/words1.out[] ------------------------------- -The breadcrumb trail following the "Failure" text is the nesting of -the tests. It goes from the test suites, which can be nested in each -other, through the test function, and finally to the message from the -assertion. In the language of *Cgreen*, a "failure" is a mismatched -assertion, or constraint, and an "exception" occurs when a test fails -to complete for any reason, e.g. a segmentation fault. - -We could get this to pass just by returning the value 4. Doing TDD in -really small steps, you would actually do this, but we're not teaching -TDD here. Instead we'll go straight to the core of the -implementation... +The breadcrumb trail following the "Failure" text is the nesting of the tests. +It goes from the test suites, which can be nested in each other, through the test function, and finally to the message from the assertion. +In the language of *Cgreen*, a "failure" is a mismatched assertion, or constraint, and an "exception" occurs when a test fails to complete for any reason, e.g. a segmentation fault. + +We could get this to pass just by returning the value 4. +Doing TDD in really small steps, you would actually do this, but we're not teaching TDD here. +Instead we'll go straight to the core of the implementation... [source,c] -------------------------------- @@ -361,11 +311,10 @@ include::tutorial_src/words2.out[] --------------------------------- -There is actually a hidden problem here, but our tests still passed so -we'll pretend we didn't notice. +There is actually a hidden problem here, but our tests still passed so we'll pretend we didn't notice. -So it's time to add another test. We want to confirm that the string -is broken into separate words... +So it's time to add another test. +We want to confirm that the string is broken into separate words... [source,c] --------------------------------- @@ -396,8 +345,8 @@ include::tutorial_src/words4.out[] ---------------------------------- -Our earlier test now fails, because we have affected the `strlen()` -call in our loop. Moving the length calculation out of the loop... +Our earlier test now fails, because we have affected the `strlen()` call in our loop. +Moving the length calculation out of the loop... [source,c] ---------------------------------- @@ -412,18 +361,17 @@ include::tutorial_src/words5.out[] ---------------------------------- -It's nice to keep the code under control while we are actually writing -it, rather than debugging later when things are more complicated. +It's nice to keep the code under control while we are actually writing it, rather than debugging later when things are more complicated. -That was pretty straight forward. Let's do something more interesting. +That was pretty straight forward. +Let's do something more interesting. === What are Mock Functions? -The next example is a more realistic extension of our previous -attempts. As in real life we first implement something basic and then -we go for the functionality that we need. In this case a function that -invokes a callback for each word found in a sentence. Something -like... +The next example is a more realistic extension of our previous attempts. +As in real life we first implement something basic and then we go for the functionality that we need. +In this case a function that invokes a callback for each word found in a sentence. +Something like... [source,c] ---------------------------------- @@ -431,19 +379,17 @@ words("This is a sentence", &act_on_word, &memo); ---------------------------------- -Here the `memo` pointer is just some accumulated data that the -`act_on_word()` callback might work with. Other people will write the -`act_on_word()` function and probably many other functions like -it. The callback is actually a flex point, and not of interest right -now. - -The function under test is the `words()` function and we want to make -sure it walks the sentence correctly, dispatching individual words as -it goes. So what calls are made are very important. How to test this? - -Let's start with a one word sentence. In this case we would expect -the callback to be invoked once with the only word, right? Here is -the test for that... +Here the `memo` pointer is just some accumulated data that the `act_on_word()` callback might work with. +Other people will write the `act_on_word()` function and probably many other functions like it. +The callback is actually a flex point, and not of interest right now. + +The function under test is the `words()` function and we want to make sure it walks the sentence correctly, dispatching individual words as it goes. +So what calls are made are very important. +How do we go about to test this? + +Let's start with a one word sentence. +In this case we would expect the callback to be invoked once with the only word, right? +Here is the test for that... [source,c] --------------------------------- @@ -456,26 +402,21 @@ What is the funny looking `mock()` function? -A mock is basically a programmable object. In C objects are limited -to functions, so this is a mock function. The macro `mock()` compares -the incoming parameters with any expected values and dispatches -messages to the test suite if there is a mismatch. It also returns -any values that have been preprogrammed in the test. - -The test is `invokes_callback_once_for_single_word_sentence()`. It -programs the mock function using the `expect()` macro. It expects a -single call, and that single call should use the parameters `"Word"` -and `NULL`. If they don't match, we will get a test failure. - -So when the code under test (our `words()` function) calls the -injected `mocked_callback()` it in turn will call `mock()` with the -actual parameters. +A mock is basically a programmable object. +In C objects are limited to functions, so this is a mock function. +The macro `mock()` compares the incoming parameters with any expected values and dispatches messages to the test suite if there is a mismatch. +It also returns any values that have been preprogrammed in the test. + +The test is `invokes_callback_once_for_single_word_sentence()`. +It programs the mock function using the `expect()` macro. +It expects a single call, and that single call should use the parameters `"Word"` and `NULL`. +If they don't match, we will get a test failure. -Of course, we don't add the mock callback to the test suite, it's not -a test. +So when the code under test (our `words()` function) calls the injected `mocked_callback()` it in turn will call `mock()` with the actual parameters. -For a successful compile and link, the `words.h` file must now look -like... +Of course, we don't add the mock callback to the test suite, it's not a test. + +For a successful compile and link, the `words.h` file must now look like... [source,c] ---------------------------- @@ -495,46 +436,44 @@ include::tutorial_src/words6.out[] ---------------------------- -*Cgreen* reports that the callback was never invoked. We can easily -get the test to pass by filling out the implementation with... +*Cgreen* reports that the callback was never invoked. +We can easily get the test to pass by filling out the implementation with... [source,c] ---------------------------- include::tutorial_src/words6.c[lines=14..16] ---------------------------- -That is, we just invoke it once with the whole string. This is a -temporary measure to get us moving. For now everything should pass, -although it doesn't drive much functionality yet. +That is, we just invoke it once with the whole string. +This is a temporary measure to get us moving. +For now everything should pass, although it doesn't drive much functionality yet. ---------------------------- include::tutorial_src/words7.out[] ---------------------------- -That was all pretty conventional, but let's tackle the trickier case -of actually splitting the sentence. Here is the test function we will -add to `words_test.c`... +That was all pretty conventional, but let's tackle the trickier case of actually splitting the sentence. +Here is the test function we will add to `words_test.c`... [source,c] ---------------------------- include::tutorial_src/words_tests4.c[lines=37..43] ---------------------------- -Each call is expected in sequence. Any failures, or left-over or -extra calls, and we get failures. We can see all this when we run the -tests... +Each call is expected in sequence. +Any failures, or left-over or extra calls, and we get failures. +We can see all this when we run the tests... ---------------------------- include::tutorial_src/words8.out[] ---------------------------- -The first failure tells the story. Our little `words()` function -called the mock callback with the entire sentence. This makes sense, -because that was the hack we did to get to the next test. - -Although not relevant to this guide, I cannot resist getting these -tests to pass. Besides, we get to use the function we created -earlier... +The first failure tells the story. +Our little `words()` function called the mock callback with the entire sentence. +This makes sense, because that was the hack we did to get to the next test. + +Although not relevant to this guide, I cannot resist getting these tests to pass. +Besides, we get to use the function we created earlier... [source,c] ----------------------------- @@ -547,75 +486,61 @@ include::tutorial_src/words9.out[] ------------------------------ -More work than I like to admit as it took me three goes to get this -right. I firstly forgot the `+ 1` added on to `strlen()`, then forgot -to swap `sentence` for `word` in the `(*callback)()` call, and -finally third time lucky. Of course running the tests each time made -these mistakes very obvious. It's taken me far longer to write these -paragraphs than it has to write the code. +More work than I like to admit as it took me three goes to get this right. +I firstly forgot the `+ 1` added on to `strlen()`, then forgot to swap `sentence` for `word` in the `(*callback)()` call, and finally third time lucky. +Of course running the tests each time made these mistakes very obvious. +It's taken me far longer to write these paragraphs than it has to write the code. == Building Cgreen test suites -*Cgreen* is a tool for building unit tests in the C or C++ -languages. These are usually written alongside the production code by -the programmer to prevent bugs. Even though the test suites are -created by software developers, they are intended to be human -readable C code, as part of their function is an executable -specification. Used in this way, the test harness delivers constant -quality assurance. +*Cgreen* is a tool for building unit tests in the C or C++ languages. +These are usually written alongside the production code by the programmer to prevent bugs. +Even though the test suites are created by software developers, they are intended to be human readable C code, as part of their function is an executable specification. +Used in this way, the test harness delivers constant quality assurance. In other words you'll get less bugs. === Writing Basic Tests -*Cgreen* tests are like C, or C++, functions with no parameters and -no return value. To signal that they actually are tests we mark them -with the `Ensure` macro. Here's an example... +*Cgreen* tests are like C, or C++, functions with no parameters and no return value. +To signal that they actually are tests we mark them with the `Ensure` macro. +Here's an example... [source,c] ----------------------------- include::tutorial_src/strlen_tests1.c[lines=8..10] ----------------------------- -The `Ensure` macro takes two arguments (in the BDD style) where the -first is the System Under Test (SUT) which must be declared with the -`Describe` macro. +The `Ensure` macro takes two arguments (in the BDD style) where the first is the System Under Test (SUT) which must be declared with the `Describe` macro. [source,c] ----------------------------- include::tutorial_src/strlen_tests1.c[lines=4] ----------------------------- -The second argument is the test name and can be anything you want as -long as it fullfills the rules for an identifier in C and C++. A -typical way to choose the named of the tests is what we see here, -reading the declaration of the test makes sense since it is almost -plain english, "Ensure strlen returns five for 'hello'". No problem -understanding what we aim to test, or in TDD lingo, test drive. And -it can be viewed as an example from a description of what strlen -should be able to do. In a way, extracting all the `Ensure`:s from -your test might give you all the documentation you'll need. - -The call to `assert_that()` is the primary part of an assertion, -which is complemented with a constraint, in this case -`is_equal_to()`, as a parameter. This makes a very fluent interface -to the asserts, that actually reads like English. The general format -is then +The second argument is the test name and can be anything you want as long as it fullfills the rules for an identifier in C and C++. +A typical way to choose the named of the tests is what we see here, reading the declaration of the test makes sense since it is almost plain english, "Ensure strlen returns five for 'hello'". +No problem understanding what we aim to test, or in TDD lingo, test drive. +And it can be viewed as an example from a description of what strlen should be able to do. +In a way, extracting all the `Ensure`:s from your test might give you all the documentation you'll need. + +The call to `assert_that()` is the primary part of an assertion, which is complemented with a constraint, in this case +`is_equal_to()`, as a parameter. +This makes a very fluent interface to the asserts, that actually reads like English. +The general format is then [source, C] --------------- assert_that(actual, ); --------------- -NOTE: Sometimes you just want to fail the test explicitly, and there -is a function for that too, `fail_test(const char *message)`. And -there is a function to explicitly pass, `pass_test(void)`. +NOTE: Sometimes you just want to fail the test explicitly, and there is a function for that too, `fail_test(const char *message)`. +And there is a function to explicitly pass, `pass_test(void)`. -Assertions send messages to *Cgreen*, which in turn outputs the -results. +Assertions send messages to *Cgreen*, which in turn outputs the results. === The Standard Constraints @@ -668,17 +593,12 @@ | `is_greater_than_double(value)` | `> value` within the number of significant digits |========================================================= -The boolean assertion macros accept an `int` value. The equality -assertions accept anything that can be cast to `intptr_t` and simply -perform an `==` operation. The string comparisons are slightly -different in that they use the `` library function -`strcmp()`. If you use `is_equal_to()` with `char *` pointers then -it is the value of the pointers themselves that has to be the same, -i.e. the pointers have to point at the same string for the test to -pass. +The boolean assertion macros accept an `int` value. +The equality assertions accept anything that can be cast to `intptr_t` and simply perform an `==` operation. +The string comparisons are slightly different in that they use the `` library function `strcmp()`. +If you use `is_equal_to()` with `char *` pointers then it is the value of the pointers themselves that has to be the same, i.e. the pointers have to point at the same string for the test to pass. -The constraints above should be used as the second argument to one of the -assertion functions: +The constraints above should be used as the second argument to one of the assertion functions: |========================================================= |*Assertion* |*Description* @@ -691,11 +611,9 @@ |========================================================= -WARNING: You cannot use C/C++ string literal concatenation (like -`"don't" "use" "string" "concatenation"`) in the parameters to the -constraints. If you do, you will get weird error messages about missing -arguments to the constraint macros. This is caused by the macros using -argument strings to produce nice failure messages. +WARNING: You cannot use C/C++ string literal concatenation (like `"don't" "use" "string" "concatenation"`) in the parameters to the constraints. +If you do, you will get weird error messages about missing arguments to the constraint macros. +This is caused by the macros using argument strings to produce nice failure messages. === Asserting C++ Exceptions @@ -710,14 +628,12 @@ === BDD Style vs. TDD Style -So far we have encouraged the modern BDD style. It has merits that we -really want you to benefit from. But you might come across *Cgreen* -test in another style, more like the standard TDD style, which is -more inline with previous thinking and might be more similar to other -frameworks. +So far we have encouraged the modern BDD style. +It has merits that we really want you to benefit from. +But you might come across *Cgreen* test in another style, more like the standard TDD style, which is more inline with previous thinking and might be more similar to other frameworks. -The only difference, in principle, is the use of the SUT or -'context'. In the BDD style you have it, in the TDD style you don't. +The only difference, in principle, is the use of the SUT or 'context'. +In the BDD style you have it, in the TDD style you don't. [source,c] .BDD style: @@ -732,9 +648,8 @@ CAUTION: You can only have tests for a single SUT in the same source file. -If you use the older pure-TDD style you skip the `Describe` macro, the -`BeforeEach` and `AfterEach` functions. You don't need a SUT in the -`Ensure()` macro or when you add the test to the suite. +If you use the older pure-TDD style you skip the `Describe` macro, the `BeforeEach` and `AfterEach` functions. +You don't need a SUT in the `Ensure()` macro or when you add the test to the suite. [source,c] .TDD style: @@ -746,107 +661,37 @@ <3> No SUT/context in `add_test()` and you should use this function instead of `..with_context()`. -TIP: You might think of the TDD style as the BDD style with a default -SUT or context. - - -=== Legacy Style Assertions - -Cgreen have been around for a while, developed and matured. There is -an older style of assertions that was the initial version, a style -that we now call the 'legacy style', because it was more aligned with -the original, now older, unit test frameworks. If you are not interested -in historical artifacts, I recommend that you skip this section. - -But for completeness of documentation, here are the legacy style -assertion macros: - -|========================================================= -|*Assertion* |*Description* -| `assert_true(boolean)` | Passes if boolean evaluates true -| `assert_false(boolean)` | Fails if boolean evaluates true -| `assert_equal(first, second)` | Passes if 'first == second' -| `assert_not_equal(first, second)` | Passes if 'first != second' -| `assert_string_equal(char *, char *)` | Uses 'strcmp()' and passes if - the strings are equal -| `assert_string_not_equal(char *, char *)` | Uses 'strcmp()' and fails - if the strings are equal -|========================================================= - -Each assertion has a default message comparing the two values. If you -want to substitute your own failure messages, then you must use the -`*_with_message()` counterparts... - -|========================================================= -|*Assertion* -| `assert_true_with_message(boolean, message, ...)` -| `assert_false_with_message(boolean, message, ...)` -| `assert_equal_with_message(tried, expected, message, ...)` -| `assert_not_equal_with_message(tried, unexpected, message, ...)` -| `assert_string_equal_with_message(char *, char *, message, ...)` -| `assert_string_not_equal_with_message(char *, char *, message, ...)` -|========================================================= - -All these assertions have an additional `char *` message parameter, -which is the message you wished to display on failure. If this is set -to `NULL`, then the default message is shown instead. The most useful -assertion from this group is `assert_true_with_message()` as you can -use that to create your own assertion functions with your own -messages. - -Actually the assertion macros have variable argument lists. The -failure message acts like the template in `printf()`. We could change -the test above to be... - -[source,c] ------------------------------ -include::tutorial_src/strlen_tests4.c[lines=4..8] ------------------------------ - -This should produce a slightly more user friendly message when things -go wrong. But, actually, Cgreens default messages are so good that you -are encouraged to skip the legacy style and go for the more modern -constraints style assertions. Particularly in conjuction with the BDD -style test notation. - -IMPORTANT: We strongly recommend the use of BDD Style notation with -constraints based assertions. +TIP: You might think of the TDD style as the BDD style with a default SUT or context. === A Runner -The tests are only run by running a test suite in some form. (But see -also <>.) We can create and run one especially for this test like -so... +The tests are only run by running a test suite in some form. +(But see also <>.) +We can create and run one especially for this test like so... [source,c] ----------------------------- include::tutorial_src/strlen_tests5.c[lines=12..16] ----------------------------- -In case you have spotted that the reference to -`returns_five_for_hello` should have an ampersand in front of it, -`add_test_with_context()` is actually a macro. The `&` is added -automatically. Further more, the `Ensure()`-macro actually mangles the -tests name, so it is not actually a function name. (This might also -make them a bit difficult to find in the debugger....) +In case you have spotted that the reference to `returns_five_for_hello` should have an ampersand in front of it, `add_test_with_context()` is actually a macro. +The `&` is added automatically. +Further more, the `Ensure()`-macro actually mangles the tests name, so it is not actually a function name. +(This might also make them a bit difficult to find in the debugger....) -To run the test suite, we call `run_test_suite()` on it. So we can -just write... +To run the test suite, we call `run_test_suite()` on it. +So we can just write... [source,c] ----------------------------- include::tutorial_src/strlen_tests5.c[lines=19] ----------------------------- -The results of assertions are ultimately delivered as passes and -failures to a collection of callbacks defined in a `TestReporter` -structure. There is a predefined `TestReporter` in *Cgreen* called the -`TextReporter` that delivers messages in plain text like we have -already seen. +The results of assertions are ultimately delivered as passes and failures to a collection of callbacks defined in a `TestReporter` structure. +There is a predefined `TestReporter` in *Cgreen* called the `TextReporter` that delivers messages in plain text like we have already seen. -The return value of `run_test_suite()` is a standard C library/Unix -exit code that can be returned directly by the `main()` function. +The return value of `run_test_suite()` is a standard C library/Unix exit code that can be returned directly by the `main()` function. The complete test code now looks like... @@ -864,9 +709,9 @@ include::tutorial_src/strlen2.out[] ----------------------------- -We can see that the outer test suite is called `our_tests` since it -was in `our_tests()` we created the test suite. There are no messages -shown unless there are failures. So, let's break our test to see it... +We can see that the outer test suite is called `our_tests` since it was in `our_tests()` we created the test suite. +There are no messages shown unless there are failures. +So, let's break our test to see it... [source,c] ----------------------------- @@ -879,12 +724,10 @@ include::tutorial_src/strlen6.out[] ----------------------------- -*Cgreen* starts every message with the location of the test failure so -that the usual error message identifying tools (like Emacs's -`next-error`) will work out of the box. +*Cgreen* starts every message with the location of the test failure so that the usual error message identifying tools (like Emacs's `next-error`) will work out of the box. -Once we have a basic test scaffold up, it's pretty easy to add more -tests. Adding a test of `strlen()` with an empty string for example... +Once we have a basic test scaffold up, it's pretty easy to add more tests. +Adding a test of `strlen()` with an empty string for example... [source,c] ----------------------------- @@ -898,24 +741,20 @@ === BeforeEach and AfterEach -It's common for test suites to have a lot of duplicate code, -especially when setting up similar tests. Take this database code for -example... +It's common for test suites to have a lot of duplicate code, especially when setting up similar tests. +Take this database code for example... [source,c] ------------------------------ include::tutorial_src/schema_tests1.c[] ------------------------------ -We have already factored out the duplicate code into its own functions -`create_schema()` and `drop_schema()`, so things are not so bad. At -least not yet. But what happens when we get dozens of tests? For a -test subject as complicated as a database -http://www.martinfowler.com/eaaCatalog/activeRecord.html[ActiveRecord], -having dozens of tests is very likely. +We have already factored out the duplicate code into its own functions `create_schema()` and `drop_schema()`, so things are not so bad. +At least not yet. +But what happens when we get dozens of tests? +For a test subject as complicated as a database http://www.martinfowler.com/eaaCatalog/activeRecord.html[ActiveRecord], having dozens of tests is very likely. -We can get *Cgreen* to do some of the work for us by calling these -methods before and after each test in the test suite. +We can get *Cgreen* to do some of the work for us by calling these methods before and after each test in the test suite. Here is the new version... @@ -930,44 +769,34 @@ ... --------------------------- -With this new arrangement *Cgreen* runs the `create_schema()` function -before each test, and the `drop_schema()` function after each -test. This saves some repetitive typing and reduces the chance of -accidents. It also makes the tests more focused. - -The reason we try so hard to strip everything out of the test -functions is the fact that the test suite acts as documentation. In -our `person.h` example we can easily see that `Person` has some kind -of name property, and that this value must be unique. For the tests to -act like a readable specification we have to remove as much mechanical -clutter as we can. +With this new arrangement *Cgreen* runs the `create_schema()` function before each test, and the `drop_schema()` function after each test. +This saves some repetitive typing and reduces the chance of accidents. +It also makes the tests more focused. + +The reason we try so hard to strip everything out of the test functions is the fact that the test suite acts as documentation. +In our `person.h` example we can easily see that `Person` has some kind of name property, and that this value must be unique. +For the tests to act like a readable specification we have to remove as much mechanical clutter as we can. -In this particular case there are more lines that we could move from -the tests to `BeforeEach()`: +In this particular case there are more lines that we could move from the tests to `BeforeEach()`: [source,c] --------------------------- include::tutorial_src/schema_tests2.c[lines=25..26] --------------------------- -Of course that would require an extra variable, and it might make the -tests less clear. And as we add more tests, it might turn out to not -be common to all tests. This is a typical judgement call that you -often get to make with `BeforeEach()` and `AfterEach()`. - -NOTE: If you use the pure-TDD notation, not having the test subject -named by the `Describe` macro, you can't have the `BeforeEach()` and -`AfterEach()` either. In this case you can still run a function before -and after every test. Just nominate any `void(void)` function by -calling the function `set_setup()` and/or `set_teardown()` with the -suite and the function that you want to run before/after each test, -e.g. in the example above `set_setup(suite, create_schema);` and -`set_teardown(suite, drop_schema);`. - -A couple of details. There is only one `BeforeEach()` and one -`AfterEach()` allowed in each `TestSuite`. Also, the `AfterEach()` -function may not be run if the test crashes, causing some test -interference. This brings us nicely onto the next section... +Of course that would require an extra variable, and it might make the tests less clear. +And as we add more tests, it might turn out to not be common to all tests. +This is a typical judgement call that you often get to make with `BeforeEach()` and `AfterEach()`. + +NOTE: If you use the pure-TDD notation, not having the test subject named by the `Describe` macro, you can't have the `BeforeEach()` and `AfterEach()` either. +In this case you can still run a function before and after every test. +Just nominate any `void(void)` function by calling the function `set_setup()` and/or `set_teardown()` with the suite and the function that you want to run before/after each test. +In the example above that would be `set_setup(suite, create_schema);` and `set_teardown(suite, drop_schema);`. + +A couple of details. +There is only one `BeforeEach()` and one `AfterEach()` allowed in each `TestSuite`. +Also, the `AfterEach()` function might not be run if the test crashes, causing some test interference. +This brings us nicely onto the next section... === Each Test in its Own Process @@ -979,18 +808,15 @@ include::tutorial_src/crash_tests1.c[lines=8..11] ----------------------------- -Crashes are not something you would normally want to have in a test -run. Not least because it will stop you receiving the very test output -you need to tackle the problem. - -To prevent segmentation faults and other problems bringing down the -test suites, *Cgreen* runs every test in its own process. - -Just before calling the `BeforeEach()` (or `setup`) function, *Cgreen* -`fork()`:s. The main process waits for the test to complete normally -or die. This includes calling the `AfterEach()`(or `teardown`) -function, if any. If the test process dies, an exception is reported -and the main test process carries on with the next test. +Crashes are not something you would normally want to have in a test run. +Not least because it will stop you receiving the very test output you need to tackle the problem. + +To prevent segmentation faults and other problems bringing down the test suites, *Cgreen* runs every test in its own process. + +Just before calling the `BeforeEach()` (or `setup`) function, *Cgreen* `fork()`:s. +The main process waits for the test to complete normally or die. +This includes calling the `AfterEach()`(or `teardown`) function, if any. +If the test process dies, an exception is reported and the main test process carries on with the next test. For example... @@ -1005,30 +831,29 @@ include::tutorial_src/crash1.out[] ----------------------------- -The normal thing to do in this situation is to fire up the -debugger. Unfortunately, the constant `fork()`:ing of *Cgreen* can be -one extra complication too many when debugging. It's enough of a -problem to find the bug. - -To get around this, and also to allow the running of one test at a -time, *Cgreen* has the `run_single_test()` function. The signatures of -the two run methods are... +The normal thing to do in this situation is to fire up the debugger. +Unfortunately, the constant `fork()`:ing of *Cgreen* can be one extra complication too many when debugging. +It's enough of a problem to find the bug. + +To get around this, and also to allow the running of one test at a time, *Cgreen* has the `run_single_test()` function. +The signatures of the two run methods are... - `int run_test_suite(TestSuite *suite, TestReporter *reporter);` - `int run_single_test(TestSuite *suite, char *test, TestReporter *reporter);` -The extra parameter of `run_single_test()`, the `test` string, is the -name of the test to select. This could be any test, even in nested -test suites (see below). Here is how we would use it to debug our -crashing test... +The extra parameter of `run_single_test()`, the `test` string, is the name of the test to select. +This could be any test, even in nested test suites (see below). +Here is how we would use it to debug our crashing test... [source,c] ----------------------------- include::tutorial_src/crash_tests2.c[lines=13..17] ----------------------------- -When run in this way, *Cgreen* will not `fork()`. But see the section -on <>. +When run in this way, *Cgreen* will not `fork()`. +But see the section on <>. + +TIP: The function `run()` is a good place to place a breakpoint. The following is a typical session: @@ -1065,14 +890,11 @@ Which shows exactly where the problem is. -This deals with the case where your code throws an exception like -segmentation fault, but what about a process that fails to complete by -getting stuck in a loop? - -Well, *Cgreen* will wait forever too. But, using the C signal -handlers, we can place a time limit on the process by sending it an -interrupt. To save us writing this ourselves, *Cgreen* includes the -`die_in()` function to help us out. +This deals with the case where your code throws an exception like segmentation fault, but what about a process that fails to complete by getting stuck in a loop? + +Well, *Cgreen* will wait forever too. +But, using the C signal handlers, we can place a time limit on the process by sending it an interrupt. +To save us writing this ourselves, *Cgreen* includes the `die_in()` function to help us out. Here is an example of time limiting a test... @@ -1090,54 +912,40 @@ include::tutorial_src/crash3.out[] --------------------------- -Note that you see the test results as they come in. *Cgreen* streams the -results as they happen, making it easier to figure out where the test -suite has problems. - -Of course, if you want to set a general time limit on all your tests, -then you can add a `die_in()` to a `BeforeEach()` (or `setup()`) -function. *Cgreen* will then apply the limit to each of the tests in -that context, of course. - -Another possibility is the use of an environment variable named -`CGREEN_TIMEOUT_PER_TEST` which, if set to a number will apply that -timeout to every test run. This will apply to all tests in the same -run. +Note that you see the test results as they come in. +*Cgreen* streams the results as they happen, making it easier to figure out where the test suite has problems. + +Of course, if you want to set a general time limit on all your tests, then you can add a `die_in()` to a `BeforeEach()` (or `setup()`) function. +*Cgreen* will then apply the limit to each of the tests in that context, of course. + +Another possibility is the use of an environment variable named `CGREEN_TIMEOUT_PER_TEST` which, if set to a number will apply that timeout to every test run. +This will apply to all tests in the same run. [[debugging]] === Debugging *Cgreen* tests -*Cgreen* protects itself from being torn down by an exception in a -test by `fork()`-ing each test into a separate process. A catastrophic -error will then only affect the child process for that specific test -and *Cgreen* can catch it, rather than crashing too. It can then -report the exception and continue with the next test. +*Cgreen* protects itself from being torn down by an exception in a test by `fork()`-ing each test into a separate process. +A catastrophic error will then only affect the child process for that specific test and *Cgreen* can catch it, rather than crashing too. +It can then report the exception and continue with the next test. ==== No fork, please -If you want to debug any of your tests the constant `fork()`-ing -might make that difficult or impossible. There are also other -circumstances that might require that you don't use `fork()`. +If you want to debug any of your tests the constant `fork()`-ing might make that difficult or impossible. +There are also other circumstances that might require that you don't use `fork()`. There are two ways to make *Cgreen* refrain from `fork()`-ing. -*Cgreen* does not `fork()` when only a single test is run by name with -the function `run_single_test()`. To debug you can then obviously set -a breakpoint at that test (but note that its actual name probably have -been mangled). *Cgreen* does some book-keeping before actually getting -to the test, so a function easier to find might be the one simply -called `run()`. - -The second way is to define the environment variable -`CGREEN_NO_FORK`. If *Cgreen* can get that variable from the -environment using `getenv()` it will run the test(s) in the same -process. In this case the non-forking applies to *all* tests run, so -all test will run in the same process, namely *Cgreen*s main process. - -WARNING: This might bring your whole test suite down if a single test -causes an exception. So it is not a recommended setting for normal -use. +*Cgreen* does not `fork()` when only a single test is run by name with the function `run_single_test()`. +To debug you can then obviously set a breakpoint at that test (but note that its actual name probably have been mangled). +*Cgreen* does some book-keeping before actually getting to the test, so a function easier to find might be the one simply called `run()`. + +The second way is to define the environment variable `CGREEN_NO_FORK`. +If *Cgreen* can get that variable from the environment using `getenv()` it will run the test(s) in the same process. +In this case the non-forking applies to *all* tests run, so all test will run in the same process, namely *Cgreen*s main process. + +WARNING: This might bring your whole test suite down if a single test causes an exception. +So it is not a recommended setting for normal use. ==== Debugging with `cgreen-runner` @@ -1154,13 +962,11 @@ ==== `cgreen-debug` -For some platforms a utility script, `cgreen-debug`, is installed when -you install *Cgreen*. It makes it very convenient to start a debugging -session for a particular test. - -Find out the logical name of the test, which is composed of the -Context and the Testname, in the form :. Then just -invoke `cgreen-debug` +For some platforms a utility script, `cgreen-debug`, is installed when you install *Cgreen*. +It makes it very convenient to start a debugging session for a particular test. + +Find out the logical name of the test, which is composed of the Context and the Testname, in the form :. +Then just invoke `cgreen-debug` ---- $ cgreen-debug : @@ -1172,14 +978,15 @@ NOTE: Currently it only supports `gdb` and will prefer `cgdb` if that's available. + === Building Composite Test Suites -The `TestSuite` is a composite structure. This means test suites can -be added to test suites, building a tree structure that will be -executed in order. +The `TestSuite` is a composite structure. +This means test suites can be added to test suites, building a tree structure that will be executed in order. Let's combine the `strlen()` tests with the `Person` tests above. -Firstly we need to remove the `main()` functions. E.g... +Firstly we need to remove the `main()` functions. +E.g... [source,c] ---------------------------- @@ -1197,57 +1004,44 @@ include::tutorial_src/suite1.c[] ----------------------- -It's usually easier to place the `TestSuite` prototypes directly in -the runner source, rather than have lot's of header files. This is -the same reasoning that let us drop the prototypes for the test -functions in the actual test scripts. We can get away with this, -because the tests are more about documentation than encapsulation. - -As we saw above, we can run a single test using the -`run_single_test()` function, and we'd like to be able to do that from -the command line. So we added a simple `if` block to take the test -name as an optional argument. The entire test suite will be searched -for the named test. This trick also saves us a recompile when we -debug. - -When you use the BDD notation you can only have a single test subject -(which is actually equivalent of a suite) in a single file because you -can only have one `Describe()` macro in each file. But using this -strategy you can create composite suites that takes all your tests and -run them in one go. - -CAUTION: Rewrite pending. The next couple of sections does not reflect -the current best thinking. They are remnants of the TDD -notation. Using BDD notation you would create separate contexts, each -in its own file, with separate names, for each of the fixture cases. - -NOTE: If you use the TDD (non-BDD) notation you can build several test -suites in the same file, even nesting them. We can even add mixtures -of test functions and test suites to the same parent test suite. +It's usually easier to place the `TestSuite` prototypes directly in the runner source, rather than have lot's of header files. +This is the same reasoning that let us drop the prototypes for the test functions in the actual test scripts. +We can get away with this, because the tests are more about documentation than encapsulation. + +As we saw above, we can run a single test using the `run_single_test()` function, and we'd like to be able to do that from the command line. +So we added a simple `if` block to take the test name as an optional argument. +The entire test suite will be searched for the named test. +This trick also saves us a recompile when we debug. + +When you use the BDD notation you can only have a single test subject (which is actually equivalent of a suite) in a single file because you can only have one `Describe()` macro in each file. +But using this strategy you can create composite suites that takes all your tests and run them in one go. + +CAUTION: Rewrite pending. +The next couple of sections does not reflect the current best thinking. +They are remnants of the TDD notation. +Using BDD notation you would create separate contexts, each in its own file, with separate names, for each of the fixture cases. + +NOTE: If you use the TDD (non-BDD) notation you can build several test suites in the same file, even nesting them. +We can even add mixtures of test functions and test suites to the same parent test suite. Loops will give trouble, however. -NOTE: If we do place several suites in the same file, then all the suites -will be named the same in the breadcrumb trail in the test message. -They will all be named after the function the create call sits in. If -you want to get around this, or you just like to name your test -suites, you can use `create_named_test_suite()` instead of -`create_test_suite()`. This takes a single string parameter. In fact -`create_test_suite()` is just a macro that inserts the `__func__` -constant into `create_named_test_suite()`. - -What happens to `setup` and `teardown` functions in a `TestSuite` that -contains other `TestSuite`:s? - -Well firstly, *Cgreen* does not `fork()` when running a suite. It -leaves it up to the child suite to `fork()` the individual tests. -This means that a `setup` and `teardown` will run in the main -process. They will be run once for each child suite. - -We can use this to speed up our `Person` tests above. Remember we -were creating a new connection and closing it again in the fixtures. -This means opening and closing a lot of connections. At the slight -risk of some test interference, we could reuse the connection accross -tests... +NOTE: If we do place several suites in the same file, then all the suites will be named the same in the breadcrumb trail in the test message. +They will all be named after the function the create call sits in. +If you want to get around this, or you just like to name your test suites, you can use `create_named_test_suite()` instead of `create_test_suite()`. +This takes a single string parameter. +In fact `create_test_suite()` is just a macro that inserts the `__func__` constant into `create_named_test_suite()`. + +What happens to `setup` and `teardown` functions in a `TestSuite` that contains other `TestSuite`:s? + +Well firstly, *Cgreen* does not `fork()` when running a suite. +It leaves it up to the child suite to `fork()` the individual tests. +This means that a `setup` and `teardown` will run in the main process. +They will be run once for each child suite. + +We can use this to speed up our `Person` tests above. +Remember we were creating a new connection and closing it again in the fixtures. +This means opening and closing a lot of connections. +At the slight risk of some test interference, we could reuse the connection accross tests... [source,c] ----------------------- @@ -1289,22 +1083,18 @@ } ----------------------- -The trick here is creating a test suite as a wrapper whose sole -purpose is to wrap the main test suite in the fixture. This is our -'fixture' pointer. This code is a little confusing, because we have -two sets of fixtures in the same test script. - -We have the MySQL connection fixture. This will run -`open_connection()` and `close_connection()` just once at the -beginning and end of the person tests. This is because the `suite` -pointer is the only member of `fixture`. - -We also have the schema fixture, the `create_schema()` and -`drop_schema()`, which is run before and after every test. Those are -still attached to the inner `suite`. +The trick here is creating a test suite as a wrapper whose sole purpose is to wrap the main test suite in the fixture. +This is our 'fixture' pointer. +This code is a little confusing, because we have two sets of fixtures in the same test script. + +We have the MySQL connection fixture. +This will run `open_connection()` and `close_connection()` just once at the beginning and end of the person tests. +This is because the `suite` pointer is the only member of `fixture`. -In the real world we would probably place the connection -fixture in its own file... +We also have the schema fixture, the `create_schema()` and `drop_schema()`, which is run before and after every test. +Those are still attached to the inner `suite`. + +In the real world we would probably place the connection fixture in its own file... [source,c] ----------------------- @@ -1337,24 +1127,20 @@ == Mocking functions with Cgreen -When testing you want certainty above all else. Random events destroy -confidence in your test suite and force needless extra runs "to be -sure". A good test places the system under test into a tightly -controlled environment. A test chamber if you like. This makes the -tests fast, repeatable and reliable. - -To create a test chamber for testing code, we have to control any -outgoing calls from the code under test. We won't believe our test -failure if our code is making calls to the internet for example. The -internet can fail all by itself. Not only do we not have total -control, but it also means we have to get dependent components working -before we can test the higher level code. This makes it difficult to -code top down. - -The solution to this dilemma is to write stub code for the components -whilst the higher level code is written. This pollutes the code base -with temporary code, and the test isolation disappears when the system -is eventually fleshed out. +When testing you want certainty above all else. +Random events destroy confidence in your test suite and force needless extra runs "to be sure". +A good test places the system under test into a tightly controlled environment. +A test chamber if you like. +This makes the tests fast, repeatable and reliable. + +To create a test chamber for testing code, we have to control any outgoing calls from the code under test. +We won't believe our test failure if our code is making calls to the internet for example. +The internet can fail all by itself. +Not only do we not have total control, but it also means we have to get dependent components working before we can test the higher level code. +This makes it difficult to code top down. + +The solution to this dilemma is to write stub code for the components whilst the higher level code is written. +This pollutes the code base with temporary code, and the test isolation disappears when the system is eventually fleshed out. The ideal is to have minimal stubs written for each individual test. *Cgreen* encourages this approach by making such tests easier to write. @@ -1369,59 +1155,61 @@ include::tutorial_src/read_paragraph1.c[lines=4..-1] ----------------------- -This is a fairly generic stream filter that turns the incoming -characters into C string paragraphs. Each call creates one paragraph, -returning a pointer to it or returning `NULL` if there is no -paragraph. The paragraph has memory allocated to it and the stream is -advanced ready for the next call. That's quite a bit of functionality, -and there are plenty of nasty boundary conditions. I really want this -code tested before I deploy it. - -The problem is the stream dependency. We could use a real stream, but -that will cause all sorts of headaches. It makes the test of our -paragraph formatter dependent on a working stream. It means we have -to write the stream first, bottom up coding rather than top down. It -means we will have to simulate stream failures - not easy. It will -also mean setting up external resources. This is more work, will run -slower, and could lead to spurious test failures. - -By contrast, we could write a simulation of the stream for each test, -called a "server stub". - -For example, when the stream is empty nothing should happen. We -hopefully get `NULL` from `read_paragraph` when the stream is -exhausted. That is, it just returns a steady stream of `EOF`s. +This is a fairly generic stream filter that turns the incoming characters into C string paragraphs. +Each call creates one paragraph, returning a pointer to it or returning `NULL` if there is no paragraph. +The paragraph has memory allocated to it and the stream is advanced ready for the next call. +That's quite a bit of functionality, and there are plenty of nasty boundary conditions. +I really want this code tested before I deploy it. + +The problem is the stream dependency. +We could use a real stream, but that will cause all sorts of headaches. +It makes the test of our paragraph formatter dependent on a working stream. +It means we have to write the stream first, bottom up coding rather than top down. +It means we will have to simulate stream failures - not easy. +It will also mean setting up external resources. +This is more work, will run slower, and could lead to spurious test failures. + +By contrast, we could write a simulation of the stream for each test, called a "server stub". + +For example, when the stream is empty nothing should happen. +We hopefully get `NULL` from `read_paragraph` when the stream is exhausted. +That is, it just returns a steady stream of `EOF`s. + +Fortunately, this function takes the stream as a parameter. +This is called dependency injection and is a very important concept. +Thanks to this we can write a small function, a stub, with the same signature, that simulates a real stream, and inject that instead of a real stream, which the production code probably does. + +NOTE: If the code does not inject the dependency this way we can often compile the stub separately and link with that instead the real stream. +In this case your stub will have to have the same name as the original function, of course. +(This is sometimes called the linkage seam.) [source,c] ----------------------- include::tutorial_src/stream_tests0.c[lines=6..22] ----------------------- -Our simulation is easy here, because our fake stream returns only one -value. Things are harder when the function result changes from call -to call as a real stream would. Simulating this would mean messing -around with static variables and counters that are reset for each -test. And of course, we will be writing quite a few stubs. Often a -different one for each test. That's a lot of clutter. +Our simulation is easy here, because our fake stream returns only one value. +Things are harder when the function result changes from call to call as a real stream would. +Simulating this would mean messing around with static variables and counters that are reset for each test. +And of course, we will be writing quite a few stubs. +Often a different one for each test. +That's a lot of clutter. -*Cgreen* can handle this clutter for us by letting us write a single -programmable function for all our tests. +*Cgreen* can handle this clutter for us by letting us write a single programmable function for all our tests. === Record and Playback -We can redo our example by creating a `stream_stub()` function. We can -call it anything we want, and since I thought we wanted to have a -stubbed stream... +We can redo our example by creating a `stream_stub()` function. +We can call it anything we want, and since I thought we wanted to have a stubbed stream... [source,c] ----------------------- include::tutorial_src/stream_tests1.c[lines=6..8] ----------------------- -Hardly longer that our trivial server stub above, it is just a macro -to generate a return value, but we can reuse this in test after -test. Let's see how. +Hardly longer that our trivial server stub above, it is just a macro to generate a return value, but we can reuse this in test after test. +Let's see how. For our simple example above we just tell it to always return `EOF`... @@ -1430,11 +1218,8 @@ include::tutorial_src/stream_tests1.c[lines=1..17] ----------------------- -<1> The `always_expect()` macro takes as arguments the function -name and defines the return value using the call to -`will_return()`. This is a declaration of an expectation of a call to -the stub, and we have told our `stream_stub()` to always return `EOF` -when called. +<1> The `always_expect()` macro takes the function name as an argument and also defines the return value using the call to `will_return()`. +This is a declaration of an expectation of a call to the stub, and we have told our `stream_stub()` to always return `EOF` when called. Let's see if our production code actually works... @@ -1442,12 +1227,11 @@ include::tutorial_src/stream1.out[] ----------------------- -So far, so good. On to the next test. +So far, so good. +On to the next test. -If we want to test a one character line, we have to send the -terminating `EOF` or `"\n"` as well as the single character. -Otherwise our code will loop forever, giving an infinite line of that -character. +If we want to test a one character line, we have to send the terminating `EOF` or `"\n"` as well as the single character. +Otherwise our code will loop forever, giving an infinite line of that character. Here is how we can do this... @@ -1457,11 +1241,9 @@ include::tutorial_src/stream_tests2.c[lines=19..25] ----------------------- -Unlike the `always_expect()` instruction, `expect()` sets up an -expectation of a single call and specifying `will_return()` sets the -single return value for just that call. It acts like a record and -playback model. Successive expectations map out the return sequence -that will be given back once the test proper starts. +Unlike the `always_expect()` instruction, `expect()` sets up an expectation of a single call and specifying `will_return()` sets the single return value for just that call. +It acts like a record and playback model. +Successive expectations map out the return sequence that will be given back once the test proper starts. We'll add this test to the suite and run it... @@ -1469,7 +1251,9 @@ include::tutorial_src/stream2.out[] ----------------------- -Oops. Our code under test doesn't work. Already we need a fix... +Oops. +Our code under test doesn't work. +Already we need a fix... [source,c] ----------------------- @@ -1485,15 +1269,11 @@ include::tutorial_src/stream3.out[] ----------------------- -So, how do the *Cgreen* stubs work? Each `expect()` describes one -call to the stub and a call to `will_return()` is included it will -aggregate into a list of return values which are used and returned in -order as the expected calls arrive. - -The `mock()` macro captures the parameter names and the `__func__` -property (the name of the stub function). *Cgreen* can then use these -to look up entries in the return list, and also to generate more -helpful messages. +So, how do the *Cgreen* stubs work? +Each `expect()` describes one call to the stub and when a call to `will_return()` is included, the return values will be collected and returned in order as the expected calls arrive. + +The `mock()` macro captures the parameter names, their values and the `__func__` property (the name of the stub function). +*Cgreen* can then use these to look up entries in the return list, and also to generate more helpful messages. We can now crank out our tests quite quickly... @@ -1502,22 +1282,19 @@ include::tutorial_src/stream_tests3.c[lines=27..33] ----------------------- -I've been a bit naughty. As each test runs in its own process, I -haven't bothered to free the pointers to the paragraphs. I've just -let the operating system do it. Purists may want to add the extra -clean up code. - -I've also used `always_expect()` for the last instruction. Without -this, if the stub is given an instruction it does not expect, it will -throw a test failure. This is overly restrictive, as our -`read_paragraph()` function could quite legitimately call the stream -after it had run off of the end. OK, that would be odd behaviour, but -that's not what we are testing here. If we were, it would be placed -in a test of its own. The `always_expect()` call tells *Cgreen* to -keep going after the first three letters, allowing extra calls. +I've been a bit naughty. +As each test runs in its own process, I haven't bothered to free the pointers to the paragraphs. +I've just let the operating system do it. +Purists may want to add the extra clean up code. + +I've also used `always_expect()` for the last instruction. +Without this, if the stub is given an instruction it does not expect, it will throw a test failure. +This is overly restrictive, as our `read_paragraph()` function could quite legitimately call the stream after it had run off of the end. +OK, that would be odd behaviour, but that's not what we are testing here. +If we were, it would be placed in a test of its own. +The `always_expect()` call tells *Cgreen* to keep going after the first three letters, allowing extra calls. -As we build more and more tests, they start to look like a -specification of the wanted behaviour... +As we build more and more tests, they start to look like a specification of the wanted behaviour... [source,c] ----------------------- @@ -1531,11 +1308,12 @@ include::tutorial_src/stream_tests4.c[lines=43..46] ----------------------- -This time we must not use `always_return()`. We want to leave the -stream where it is, ready for the next call to `read_paragraph()`. If -we call the stream beyond the line ending, we want to fail. +This time we must not use `always_return()`. +We want to leave the stream where it is, ready for the next call to `read_paragraph()`. +If we call the stream beyond the line ending, we want to fail. -Oops, that was a little too fast. Turns out we are failing anyway... +Oops, that was a little too fast. +Turns out we are failing anyway... ----------------------- include::tutorial_src/stream5.out[] @@ -1556,61 +1334,33 @@ ----------------------- There are no limits to the number of stubbed methods within a test, -only that two stubs cannot have the same name. The following will -cause problems... +only that two stubs cannot have the same name. +The following will cause problems... [source,c] ----------------------- include::tutorial_src/multiple_streams1.c[lines=10..-1] ----------------------- -You __could__ program the same stub to return values for the two -streams, but that would make a very brittle test. Since we'd be making -it heavily dependent on the exact internal behaviour that we are -trying to test, or test drive, it will break as soon as we change that -implementation. The test will also become very much harder to read and -understand. And we really don't want that. +You __could__ program the same stub to return values for the two streams, but that would make a very brittle test. +Since we'd be making it heavily dependent on the exact internal behaviour that we are trying to test, or test drive, it will break as soon as we change that implementation. +The test will also become very much harder to read and understand. And we really don't want that. -So, it will be necessary to have two stubs to make this test behave, -but that's not a problem... +So, it will be necessary to have two stubs to make this test behave, but that's not a problem... [source,c] ----------------------- include::tutorial_src/multiple_streams2.c[lines=10..-1] ----------------------- -We now have a way of writing fast, clear tests with no external -dependencies. The information flow is still one way though, from stub -to the code under test. When our code calls complex procedures, we -won't want to pick apart the effects to infer what happened. That's -too much like detective work. And why should we? We just want to -know that we dispatched the correct information down the line. - -Things get more interesting when we think of the traffic going the -other way, from code to stub. This gets us into the same territory as -mock objects. - - -=== Mocks with side effects - -Sometimes returning simple values is not enough. The function that you -want to mock might have some side effect, like setting a global error -code, or aggregate some data. - -Let's assume that the `reader` increments a counter every time it gets -called and we need to mimic that behaviour. There are many ways to do -this, but here is one using the side effect feature. It works by -calling a callback function that you provide, allowing you to -feed some data to it. - -We create the "side effect function" which needs to take a single -argument which should be a pointer to the "side effect data". You will -have to cast that datapointer to the correct type. +We now have a way of writing fast, clear tests with no external dependencies. +The information flow is still one way though, from stub to the code under test. +When our code calls complex procedures, we won't want to pick apart the effects to infer what happened. +That's too much like detective work. +And why should we? We just want to know that we dispatched the correct information down the line. -[source, c] ------------------------ -include::tutorial_src/side_effect.c[lines=29..44] ------------------------ +Things get more interesting when we think of the traffic going the other way, from code to stub. +This gets us into the same territory as mock objects. === Setting Expectations on Mock Functions @@ -1623,21 +1373,17 @@ include::tutorial_src/stream.c[lines=23..32] ----------------------- -This is the start of a formatter utility. Later filters will probably -break the paragaphs up into justified text, but right now that is all -abstracted behind the `void write(void *, char *)` interface. Our -current interests are: does it loop through the paragraphs, and does -it crash? - -We could test correct paragraph formation by writing a stub that -collects the paragraphs into a `struct`. We could then pick apart -that `struct` and test each piece with assertions. This approach is -extremely clumsy in C. The language is just not suited to building -and tearing down complex edifices, never mind navigating them with -assertions. We would badly clutter our tests. +This is the start of a formatter utility. +Later filters will probably break the paragaphs up into justified text, but right now that is all abstracted behind the `void write(void *, char *)` interface. +Our current interests are: does it loop through the paragraphs, and does it crash? + +We could test correct paragraph formation by writing a stub that collects the paragraphs into a `struct`. +We could then pick apart that `struct` and test each piece with assertions. +This approach is extremely clumsy in C. +The language is just not suited to building and tearing down complex edifices, never mind navigating them with assertions. +We would badly clutter our tests. -Instead we'll test the output as soon as possible, right in -the called function... +Instead we'll test the output as soon as possible, right in the called function... [source,c] ----------------------- @@ -1646,13 +1392,12 @@ ... ----------------------- -By placing the assertions into the mocked function, we keep the tests -minimal. The catch with this method is that we are back to writing -individual functions for each test. We have the same problem as we -had with hand coded stubs. +By placing the assertions into the mocked function, we keep the tests minimal. +The catch with this method is that we are back to writing individual functions for each test. +We have the same problem as we had with hand coded stubs. -Again, *Cgreen* has a way to automate this. Here is the rewritten -test... +Again, *Cgreen* has a way to automate this. +Here is the rewritten test... [source,c] ----------------------- @@ -1661,36 +1406,449 @@ Where are the assertions? -Unlike our earlier stub, `reader()` can now check its parameters. In -object oriented circles, an object that checks its parameters as well -as simulating behaviour is called a mock object. By analogy -`reader()` is a mock function, or mock callback. - -Using the `expect` macro, we have set up the expectation that -`writer()` will be called just once. That call must have the string -`"a"` for the `paragraph` parameter. If the actual value of that -parameter does not match, the mock function will issue a failure -straight to the test suite. This is what saves us writing a lot of -assertions. +Unlike our earlier stub, `reader()` can now check its parameters. +In object oriented circles, an object that checks its parameters as well as simulating behaviour is called a mock object. +By analogy `reader()` is a mock function, or mock callback. + +Using the `expect()` macro, we have set up the expectation that `writer()` will be called just once. +That call must have the string `"a"` for the `paragraph` parameter. +If the actual value of that parameter does not match, the mock function will issue a failure straight to the test suite. +This is what saves us writing a lot of assertions. + +=== Running Tests With Mocked Functions + + +It's about time we actually ran our test... + +----------------------- +include::tutorial_src/formatter1.out[] +----------------------- + +Confident that a single character works, we can further specify the behaviour. +Firstly an input sequence... + +[source,c] +----------------------- +include::tutorial_src/formatter_tests2.c[lines=25..34] +----------------------- + +A more intelligent programmer than me would place all these calls in a +loop. + +------------------------- +include::tutorial_src/formatter2.out[] +------------------------- + +Next, checking an output sequence... + +[source,c] +----------------------- +include::tutorial_src/formatter_tests3.c[lines=36..-1] +----------------------- + +Again we can se that the `expect()` calls follow a record and playback model. +Each one tests a successive call. +This sequence confirms that we get `"a"`, `"b"` and `"c"` in order. + +----------------------- +include::tutorial_src/formatter3.out[] +----------------------- + +So, why the 5 passes? +Each `expect()` with a constrait is actually an assert. +It asserts that the call specified is actually made with the parameters given and in the specified order. +In this case all the expected calls were made. + +Then we'll make sure the correct stream pointers are passed to the correct functions. +This is a more realistic parameter check... + +[source,c] +----------------------- +include::tutorial_src/formatter_tests4.c[lines=49..-1] +----------------------- + +----------------------- +include::tutorial_src/formatter4.out[] +----------------------- + +And finally we'll specify that the writer is not called if +there is no paragraph. + +[source,c] +----------------------- +include::tutorial_src/formatter_tests5.c[lines=56..-1] +----------------------- + +This last test is our undoing... + +----------------------- +include::tutorial_src/formatter5.out[] +----------------------- + +Obviously blank lines are still being dispatched to the `writer()`. +Once this is pointed out, the fix is obvious... + +[source,c] +----------------------- +include::tutorial_src/stream2.c[lines=23..-1] +----------------------- + +Tests with `never_expect()` can be very effective at uncovering subtle +bugs. + +----------------------- +include::tutorial_src/formatter6.out[] +----------------------- + +All done. + + +=== Mocks Are... + +Using mocks is a very handy way to isolate a unit by catching and +controlling calls to external units. +Depending on your style of coding two schools of thinking have emerged. And of course *Cgreen* supports both! + + +==== Strict or Loose Mocks + +The two schools are thinking a bit differently about what mock expectations means. +Does it mean that all external calls must be declared and expected? +What happens if a call was made to a mock that wasn't expected? +And vice versa, if an expected call was not made? + +Actually, the thinking is not only a school of thought, you might want to switch from one to the other depending on the test. +So *Cgreen* allows for that too. + +By default *Cgreen* mocks are 'strict', which means that a call to an non-expected mock will be considered a failure. +So will an expected call that was not fullfilled. +You might consider this a way to define a unit through all its exact behaviours towards its neighbours. + +On the other hand, 'loose' mocks are looser. +They allow both unfulfilled expectations and try to handle unexpected calls in a reasonable way. + +You can use both with in the same suite of tests using the call `cgreen_mocks_are(strict_mocks);` and `cgreen_mocks_are(loose_mocks);` respectively. +Typically you would place that call at the beginning of the test, or in a setup or `BeforeEach()` if it applies to all tests in a suite. + + +==== Learning Mocks + +Working with legacy code and trying to apply TDD, BDD, or even simply adding some unit tests, is not easy. +You're working with unknown code that does unknown things with unknown counterparts. + +So the first step would be to isolate the unit. +We won't go into details on how to do that here, but basically you would replace the interface to other units with mocks. +This is a somewhat tedious manual labor, but will result in an isolated unit where you can start applying your unit tests. + +Once you have your unit isolated in a harness of mocks, we need to figure out which calls it does to other units, now replaced by mocks, in the specific case we are trying to test. + +This might be complicated, so *Cgreen* can make that a bit simpler. +There is a third 'mode' of the *Cgreen* mocks, the __learning mocks__. + +If you temporarily add the call `cgreen_mocks_are(learning_mocks);` at the beginning of your unit test, the mocks will record all calls and present a list of those calls in order, including the actual parameter values, on the standard output. + +So let's look at the following example from the *Cgreen* unit tests. +It's a bit contorted since the test actually call the mocked functions directly, but I believe it will serve as an example. + +[source,c] +----- +include::tutorial_src/learning_mocks.c[lines=8..-1] +----- + +We can see the call to `cgreen_mocks_are()` starting the test and +setting the mocks into learning mode. + +If we run this, just as we usually run tests, the following will show +up in our terminal: + +// This needs to be copied and pasted from the output of make in tutorial_src +// At least, I couldn't make 'make' capture it into the output file +// It ought to be: +// include::tutorial_src/learning_mocks.out[] +---- +Running "learning_mocks" (1 tests)... +LearningMocks -> emit_pastable_code : Learned mocks are + expect(string_out, when(p1, is_equal_to(1))); + expect(string_out, when(p1, is_equal_to(2))); + expect(integer_out); + expect(integer_out); + expect(string_out, when(p1, is_equal_to(3))); + expect(integer_out); +Completed "LearningMocks": 0 passes, 0 failures, 0 exceptions. +Completed "learning_mocks": 0 passes, 0 failures, 0 exceptions. +---- + +If this was for real we could just copy this and paste it in place of +the call to `cgreen_mocks_are()` and we have all the expectations +done. + +NOTE: Before you can do this you need to implement the mock functions, of course. +I.e. write functions that replaces the real functions and instead calls `mock()`. + +NOTE: If a test fails with an exception, you won't get the learned calls unfortunately. +They are collected and printed at the end of the test. +This might be improved at some future time. + +TIP: You can try the `cgreen-mocker` for this, as described in <>. + + +== More on `expect()` and `mock()` + +=== Important Things To Remember About `expect()` and `mock()` + +Using `expect()` and `mock()` is a very powerful way to isolate your code under test from its dependencies. +But it is not always easy to follow what happens, and when. + +Here are some important things to remember when working with *Cgreen* mocks. + +- calls to `expect()` collects constraints and any other required information when it is called +- this also goes for `will_return()` which will save the value of its parameter _when it is called_ +- the actual evaluation and execution of those constraints occur when `mock()` is called in the function named in the `expect()` call(s) +- calls to a function specified by the `expect()` calls are evaluated in the same order as the ``expect()``s were executed, but only for that named function +- the lexical scope of the first parameter in a `when()` is always inside the mocked function where the `mock()` call is made +- the lexical scope of arguments to an `is_equal_to...()` is where that call is made + +IMPORTANT: In summary, `expect()` does early collection, including evaluation of return value expression, and `mock()` does late evaluation of the constraints collected against the given arguments to `mock()`. + +[[refactoring-tests]] +=== Refactoring Tests with Mocks - CAUTION! + +After a while you are bound to get tests with calls to `expect()`. +You might even have common patterns in multiple tests. +So your urge to refactor starts to set in. +And that is good, go with it, we have tests to rely on. + +But there are a lot of things going on behind the scenes when you use *Cgreen*, often with the help of some serious macro-magic, so special care needs to be taken when refactoring tests that have `expect()` in them. + +==== Renaming + +The first "gotcha" is when you rename a function that you mock. +You are likely to have `expect()`s for that function too. + +CAUTION: the function name in an `expect()` is "text" so it will not be catched by a refactoring tool. +You will need to change the name there manually. + +==== Local Variables + +For example, consider this code + +[source,c] +----------------------------- +Ensure(Readline, can_read_some_characters) { + char canned_a = 'a'; + char canned_b = 'b'; + char canned_c = 'c'; + + expect(mocked_read, + will_set_contents_of_parameter(buf, &canned_a, sizeof(char)), + will_return(1)); + expect(mocked_read, + will_set_contents_of_parameter(buf, &canned_b, sizeof(char)), + will_return(1)); + expect(mocked_read, + will_set_contents_of_parameter(buf, &canned_c, sizeof(char)), + will_return(1)); + + ... + + ... +----------------------------- + +It is very tempting to break out the common expect: + +[source,c] +----------------------------- +static void expect_char(char ch) { + expect(mocked_read, + will_set_contents_of_parameter(buf, &ch, sizeof(char)), + will_return(1)); +} + +Ensure(Readline, can_read_some_characters) { + char canned_a = 'a'; + char canned_b = 'b'; + char canned_c = 'c'; + + expect_char(canned_a); + expect_char(canned_b); + expect_char(canned_c); + + ... + + ... +----------------------------- + +Much nicer, right? + +This will most likely lead to a segmentation fault or illegal memory reference, something that can be really tricky to track down. +The problem is that when `mocked_read()` is actually called, as an effect of calling something that calls `mocked_read()`, the parameter `ch` to the nicely extracted `expect_char()` does not exist anymore. + +Good thing that you run the tests after each and every little refactoring, right? +Because then you know that it was the extraction you just did that was the cause. +Then you can come here and read up on what the problem might be and what to do about it. + +At first glance the fix might look easy: + +[source,c] +----------------------------- +static void expect_char(char ch) { + char saved_ch = ch; + expect(mocked_read, + will_set_contents_of_parameter(buf, &saved_ch, sizeof(char)), + will_return(1)); +} + +Ensure(Readline, can_read_some_characters) { + ... +----------------------------- + +Close! But the local variable is also gone at the call to `mocked_read()`. Of course. + +Ok, so let's make it static: + +[source,c] +----------------------------- +static void expect_char(char ch) { + static char saved_ch = ch; + expect(mocked_read, + will_set_contents_of_parameter(buf, &saved_ch, sizeof(char)), + will_return(1)); +} + +Ensure(Readline, can_read_some_characters) { + ... +----------------------------- + +Ok, so then it must exist. +But the problem then becomes the three consequtive calls to `expect_char()`. + +[source,c] +----------------------------- +Ensure(Readline, can_read_some_characters) { + char canned_a = 'a'; + char canned_b = 'b'; + char canned_c = 'c'; + + expect_char(canned_a); + expect_char(canned_b); + expect_char(canned_c); + + ... + + ... +----------------------------- + +Each of those have a different actual parameter, which is hard to store in one variable. +Even if it is static. + +The solution is now quite obvious: + +[source,c] +----------------------------- +static void expect_char(char *ch_p) { + expect(mocked_read, + will_set_contents_of_parameter(buf, ch_p, sizeof(char)), + will_return(1)); +} + +Ensure(Readline, can_read_some_characters) { + char canned_a = 'a'; + char canned_b = 'b'; + char canned_c = 'c'; + + expect_char(&canned_a); + expect_char(&canned_b); + expect_char(&canned_c); + + ... + + ... +----------------------------- + +By using pointers to the variables in the test, we can ensure that the values are live when the expected call is made. +So we don't have to make the character variables used in the test static, because as local variables those will remain live long enough. + +And this is the moral here, you cannot use local variables in an extracted function as data for a mocked function call. + +CAUTION: Variables that are to be sent to a mocked function MUST be live at the call to that mocked function. + + +=== Other Use Cases For Mocks + +==== Out Parameters -When specifying behavior of mocks there are three parts. First, how -often the specified behaviour or expectation will be executed: +In C all function parameters are by value so if a function needs to return a value through a parameter that has to be done using a pointer. +Typically this is a pointer to the area or variable the function should fill. + +*Cgreen* provides `will_set_contents_of_parameter()` to handle this use case. +For example + +[source, c] +----------------------- +include::tutorial_src/set_contents.c[lines=3..12] +... +----------------------- + + +When the mock for `convert_to_uppercase()` is called it will write the string "UPPER CASE" in the area pointed to by `converted_string`. + +==== Setting fields + +Sometimes you need to set a field in a struct sent by reference to a mocked function. +You cannot use the `will_set_contents_of_parameter()` directly since you can't, or even don't want to, know the complete information in the structure. +But with a little bit of boilerplate in your mock function you can still write to a single field. + +In the mock function you need to create a local variable that points to the field you want to update. +You can then use this pointer variable in the mock call to supplement the real parameters. + +This local variable will then be accessible in `expect()` calls as if it was a parameter, and you can use it to wrote data to where it points, which then should be the field in the incoming structure. + +[source, c] +----------------------- +include::tutorial_src/set_field.c[lines=3..18] +... +----------------------- + +The local variable `field` in the mock function is set to point to the field that we need to update. +It is then exposed by including it in the `mock()` call, and `will_set_contents_of_parameter()` will use it to update whatever it points to with the data provided in the `expect()`. + +NOTE: Both the local variable and the data argument in the call to `will_set_contents_of_parameter()` must be pointers. +You cannot use literals as data, except when it is a string literal which as per C convention is converted to a pointer. + +==== Side Effects + +Sometimes returning simple values is not enough. +The function that you want to mock might have some side effect, like setting a global error code, or aggregate some data. + +Let's assume that the `reader` increments a counter every time it gets called and we need to mimic that behaviour. +There are many ways to do this, but here is one using the side effect feature. +It works by calling a callback function that you provide, allowing you to feed some data to it. + +We create the "side effect function" which needs to take a single argument which should be a pointer to the "side effect data". +You will have to cast that datapointer to the correct type. + +[source, c] +----------------------- +include::tutorial_src/side_effect.c[lines=29..44] +----------------------- + +=== The Mock Macros + +When specifying behavior of mocks there are three parts. +First, how often the specified behaviour or expectation will be executed: |======================================================================= |*Macro* |*Description* -|`expect(function, ...)` |Expected once, in order +|`expect(function, ...)` |Expected once, in the specified order, for the same function |`always_expect(function, ...)`|Expect this behavior from here onwards -|`never_expect(function)` |From this point this mock function must never be called +|`never_expect(function)` |From this point this mocked function must never be called |======================================================================= -You can specify constraints and behaviours for each expectation -(except for `never_expect()` naturally). A constraint places -restrictions on the parameters (and will tell you if the expected -restriction was not met), and a behaviour specifies what the mock -should do if the parameter constraints are met. +You can specify constraints and behaviours for each expectation (except for `never_expect()` naturally). +A constraint places restrictions on the parameters (and will tell you if the expected restriction was not met), and a behaviour specifies what the mock should do if the parameter constraints are met. -A parameter constraint is defined using the `when(parameter, -constraint)` macro. It takes two parameters: +A parameter constraint is defined using the `when(parameter, constraint)` macro. +It takes two parameters: |================================================= |*Parameter* |*Description* @@ -1698,8 +1856,7 @@ |`constraint`|A constraint placed on that parameter |================================================= -There is a multitude of constraints available (actually, exactly the -same as for the assertions we saw earlier): +There is a multitude of constraints available (actually, exactly the same as for the assertions we saw earlier): |========================================================================== |*Constraint* |*Type* @@ -1723,27 +1880,34 @@ |`is_greater_than_double(value)` |Double |========================================================================== -For the double valued constraints you can set the number of -significant digits to consider a match with a call to -`significant_figures_for_assert_double_are(int figures)`. -The <> has a more detailed -discussion of the algorithm used for comparing floating point numbers. +For the double valued constraints you can set the number of significant digits to consider a match with a call to `significant_figures_for_assert_double_are(int figures)`. +The <> has a more detailed discussion of the algorithm used for comparing floating point numbers. -Then there are a couple of ways to return results from the mock: +Then there are a couple of ways to return results from the mocks. +They all provide ways to return various types of values through `mock()`. +In your mocked function you can then simply return that value, or manipulate it as necessary. |=========================================================================================== -|*Macro* |*Description* -|`will_return(value)` |Return the value from the mock function (which needs to be declared returning that type) -|`will_return_by_value(value, size)` |Ditto for generic by value variables. -|`will_return_double(value)` |Ditto for double values (required because of C's type coercion rules which would otherwise convert a double into an int) -|`will_set_contents_of_parameter(parameter_name, pointer_to_value, size)`|Writes 'size' bytes from the pointed out value into the referenced parameter -|`with_side_effect(pointer_to_function, pointer_to_data)` |Executes the side effect function and passes data to it +|*Macro* |*`mock()` will...* +|`will_return(value)` | return `value`, for integer types +|`will_return_double(value)` | return `value` as a "boxed double", for double floats (required because of C's type coercion rules which would otherwise convert a double into an int) +|`will_return_by_value(struct, size)` | return a pointer to an allocated copy of the `struct` that can be copied and returned by value from the mocked function +|`will_set_contents_of_parameter(parameter_name, pointer_to_value, size)`| write `size` bytes from the pointed out value (`pointer_to_value`) into where the referenced out parameter (`parameter_name`) is pointing +|`will_capture_parameter(parameter_name, local_variable)` |capture the value of the parameter and store it in the named local variable +|`with_side_effect(function, pointer_to_data)` | call the side effect `function` and pass `pointer_to_data` to it |=========================================================================================== -Note: The `pointer_to_data` passed to `with_side_effect` need to fit inside a `intptr_t` +NOTE: *`will_return_double()`*: The "boxed double" returned by `mock()` have to be "unboxed" by the caller see <> for details. + +NOTE: *`will_return_by_value`*: The memory allocated for the copy of the struct returned by `mock()` needs to be deallocated by the caller or it will be lost. You can do this with the code in the `Box` example below. -You can combine these in various ways: +NOTE: *`will_set_contents_of_parameter`*: The data to set must be correct at the time of the call to the mock function, and not be overwritten or released between the call to the `expect()` and the mock function. See <> for details. + +NOTE: *`will_capture_parameter`*: The local variable to capture the value in must be live at the time of the call to the mock function, so using a local variable in a function called by your test will not work. See <> for details. + +=== Combining Expectations + +You can combine the expectations for a `mock()` in various ways: [source,c] ----------------------- @@ -1757,16 +1921,12 @@ will_set_contents_of_parameter(status, FD_CLOSED, sizeof(bool)))); ----------------------- -If multiple `when()` are specified they all need to be fullfilled. You -can of course only have one for each of the parameters of your mock -function. - -You can also have multiple `will_set_contents_of_parameter()` in an -expectation, one for each reference parameter, but naturally only one -`will_return()`. +If multiple `when()` are specified they all need to be fullfilled. +You can of course only have one for each of the parameters of your mock function. + +You can also have multiple `will_set_contents_of_parameter()` in an expectation, one for each reference parameter, but naturally only one `will_return()`. -To ensure that a specific call happens `n` times the macro `times(number_times_called)` can be passed -as a constraint to a specific call: +To ensure that a specific call happens `n` times the macro `times(number_times_called)` can be passed as a constraint to a specific call: [source,c] ----------------------- @@ -1775,7 +1935,19 @@ times(1)); ----------------------- -This feature only works for `expect`. +This feature only works for `expect()`. + +=== Order of constraints + +When you have multiple constraints in an `expect` the order in which they are executed is not always exactly then order in which they where given. + +First all constraints are inspected for validity, such as if the parameter name given cannot be found, but primarily to see if the parameters, if any, matche the actual parameters in the call. + +Then all read-only constraints are processed, followed by constraints that set contents. + +Finally all side effect constraints are executed. + +=== Order of multiple `expect`s The expections still need to respect the order of calling, so if we call the function `mocker_file_writer` with the following pattern: @@ -1803,216 +1975,107 @@ times(1)); ----------------------- -If the function we are mocking returns structs by value our mock -function need to do that too. So we need use another return function, -`will_return_by_value`. Here is some example code using an imaginary -struct typdef'ed as `Box` and a corresponding function, -`retrieve_box()`, which we want to mock: -[source,c] ------------------------ - expect(retrieve_box, - will_return_by_value(box, sizeof(Box)); ------------------------ +=== Returning `struct` + +If the function we are mocking returns structs by value, then our mock function need to do that too. +To do this we must use specific return macro, `will_return_by_value()`. +Below is some example code using an imaginary struct typedef'ed as `Struct` and a corresponding function, `retrieve_struct()`, which we want to mock. -And the mock function will then look like this: +The underlying mechanism of this is that in the test we create the struct that we want to return. +The macro `will_return_by_value()` then copies that to a dynamically allocated area, saving it so that a pointer to that area can be returned by `mock()`. [source,c] ----------------------- -Box retrieve_box() { - return *(Box *)mock(); -} + Struct returned_struct = {...}; + expect(retrieve_struct, + will_return_by_value(returned_struct, sizeof(Struct)); + /* `returned_struct` has been copied to an allocated area */ ----------------------- -=== Running tests with mocked functions +NOTE: In some future version the `size` argument will be removed from `will_return_by_value()` size since the macro can easily calculate that for you. -It's about time we actually ran our test... ------------------------ -include::tutorial_src/formatter1.out[] ------------------------ - -Confident that a single character works, we can further specify the -behaviour. Firstly an input sequence... +The mock function will then look like this: [source,c] ----------------------- -include::tutorial_src/formatter_tests2.c[lines=25..34] +Struct retrieve_struct() { + return *(Struct *)mock(); /* De-reference the returned pointer to the allocated area */ +} ----------------------- -A more intelligent programmer than me would place all these calls in a -loop. +This would cause a memory leak since the area allocated by the `return_by_value()` macro is not deallocated. +And in many scenarious this might not be a big problem, and you could make do with that simple version. -------------------------- -include::tutorial_src/formatter2.out[] -------------------------- - -Next, checking an output sequence... +In case we wanted to be sure, we should free the area automatically allocated by `will_return_by_value()`. +The pointer returned by `mock()` will point to that area. +So, here's a better, although slightly more complicated, version: [source,c] ----------------------- -include::tutorial_src/formatter_tests3.c[lines=36..-1] +Struct retrieve_struct() { + Struct *struct_p = (Struct *)mock(); /* Get the pointer */ + Struct the_struct = *struct_p; /* Dereference to get a struct */ + free(struct_p); /* Deallocate the returned area */ + return the_struct; /* Finally we can return the struct by value */ +} ----------------------- -Again we can se that the `expect()` calls follow a record and playback -model. Each one tests a successive call. This sequence confirms that -we get `"a"`, `"b"` and `"c"` in order. ------------------------ -include::tutorial_src/formatter3.out[] ------------------------ - -So, why the 5 passes? Each `expect()` with a constrait is actually -an assert. It asserts that the call specified is actually made with -the parameters given and in the specified order. In this case all the -expected calls were made. +=== Mocking `struct` Parameters -Then we'll make sure the correct stream pointers are passed to the -correct functions. This is a more realistic parameter check... +Modern C standards allows function parameters to be ``struct``s by value. +Since our `mock()` only can handle scalar values this presents a bit of a conundrum. [source,c] ----------------------- -include::tutorial_src/formatter_tests4.c[lines=49..-1] +include::tutorial_src/struct_parameters.c[lines=11..18] ----------------------- ------------------------ -include::tutorial_src/formatter4.out[] ------------------------ +And we also can not compare a non-scalar value with any of the `is_equal_to...()` constraint macros in the `expect()` call. +Also remember that the C language does not allow comparing non-scalar values using `==`. -And finally we'll specify that the writer is not called if -there is no paragraph. +There are a couple of ways to handle this and which one to select depends on what you want to do. -[source,c] ------------------------ -include::tutorial_src/formatter_tests5.c[lines=56..-1] ------------------------ +==== Checking Single `struct` Fields -This last test is our undoing... +In an `expect(when())` we probably want to check one, or more, of the fields in the struct. ------------------------ -include::tutorial_src/formatter5.out[] ------------------------ - -Obviously blank lines are still being dispatched to the `writer()`. -Once this is pointed out, the fix is obvious... +Since `mock()` actually can "mock" anything we can use a normal field expression to access the value we want to check: [source,c] ----------------------- -include::tutorial_src/stream2.c[lines=23..-1] +include::tutorial_src/struct_parameters.c[lines=26..28] ----------------------- -Tests with `never_expect()` can be very effective at uncovering subtle -bugs. +The trick here is that `mock()` just saves the "name", as a string, given as the argument, in this case "s.i", and pair it with the value of that expression. +There is no requirement that the "name" is actually a parameter, it can be anything. +The only thing to remember is that the exact same string needs to be used when invoking `when()`: +[source,c] ----------------------- -include::tutorial_src/formatter6.out[] +include::tutorial_src/struct_parameters.c[lines=35..36] ----------------------- -All done. - - -=== Mocks Are... - -Using mocks is a very handy way to isolate a unit by catching and -controlling calls to external units. Depending on your style of coding -two schools of thinking have emerged. And of course *Cgreen* supports -both! - - -==== Strict or Loose Mocks - -The two schools are thinking a bit differently about what mock -expectations means. Does it mean that all external calls must be -declared and expected? What happens if a call was made to a mock that -wasn't expected? And vice versa, if an expected call was not made? - -Actually, the thinking is not only a school of thought, you might want -to switch from one to the other depending on the test. So *Cgreen* -allows for that too. - -By default *Cgreen* mocks are 'strict', which means that a call to -an non-expected mock will be considered a failure. So will an expected -call that was not fullfilled. You might consider this a way to define -a unit through all its exact behaviours towards its neighbours. - -On the other hand, 'loose' mocks are looser. They allow both -unfulfilled expectations and try to handle unexpected calls in a -reasonable way. - -You can use both with in the same suite of tests using the call -`cgreen_mocks_are(strict_mocks);` and `cgreen_mocks_are(loose_mocks);` -respectively. Typically you would place that call at the beginning of -the test, or in a setup or `BeforeEach()` if it applies to all tests -in a suite. - - -==== Learning Mocks - -Working with legacy code and trying to apply TDD, BDD, or even simply -adding some unit tests, is not easy. You're working with unknown code -that does unknown things with unknown counterparts. - -So the first step would be to isolate the unit. We won't go into -details on how to do that here, but basically you would replace the -interface to other units with mocks. This is a somewhat tedious manual -labor, but will result in an isolated unit where you can start -applying your unit tests. - -Once you have your unit isolated in a harness of mocks, we need to -figure out which calls it does to other units, now replaced by mocks, -in the specific case we are trying to test. - -This might be complicated, so *Cgreen* makes that a bit simpler. There -is a third 'mode' of the *Cgreen* mocks, the __learning mocks__. - -If you temporarily add the call `cgreen_mocks_are(learning_mocks);` at -the beginning of your unit test, the mocks will record all calls and -present a list of those calls in order, including the actual parameter -values, on the standard output. - -So let's look at the following example from the *Cgreen* unit -tests. It's a bit contorted since the test actually call the mocked -functions directly, but I believe it will serve as an example. +You can do this with as many fields as you need. +And there is no (reasonable) limit to how many arguments `mock()` can take, so you can start with the ones that you require and add more as you need them. [source,c] ------ -include::tutorial_src/learning_mocks.c[lines=8..-1] ------ - -We can see the call to `cgreen_mocks_are()` starting the test and -setting the mocks into learning mode. - -If we run this, just as we usually run tests, the following will show -up in our terminal: - -// This needs to be copied and pasted from the output of make in tutorial_src -// At least, I couldn't make 'make' capture it into the output file -// It ought to be: -// include::tutorial_src/learning_mocks.out[] ----- -Running "learning_mocks" (1 tests)... -LearningMocks -> emit_pastable_code : Learned mocks are - expect(string_out, when(p1, is_equal_to(1))); - expect(string_out, when(p1, is_equal_to(2))); - expect(integer_out); - expect(integer_out); - expect(string_out, when(p1, is_equal_to(3))); - expect(integer_out); -Completed "LearningMocks": 0 passes, 0 failures, 0 exceptions. -Completed "learning_mocks": 0 passes, 0 failures, 0 exceptions. ----- - -If this was for real we could just copy this and paste it in place of -the call to `cgreen_mocks_are()` and we have all the expectations -done. +----------------------- +include::tutorial_src/struct_parameters.c[lines=42..55] +----------------------- -NOTE: Before you can do this you need to implement the mock functions, of -course. I.e. write functions that replaces the real -functions and instead calls `mock()`. +NOTE: In both example we use an explicit value in `will_return()` instead of the value of the field, "s.i". +That is because it is not possible to use the value of a mocked value in `will_return()`. +Remember, `expect()` does early collection. +At the time of executing it, there is no parameter available, so the value must come from that run-time environment. +Also, since we already explicitly know the value, we have to use it in the `when()` clause, there will be no uncertainty of what it should be. +The only concern might be duplication of an explicit value, but that is not a big problem in a unittest, clarity over DRY, and you can easily fix that with a suitably named local variable. -TIP: You can try the `cgreen-mocker` for this, as described in -<>. +=== Capturing Parameters +TBD. == Special Cases @@ -2023,20 +2086,17 @@ http://www.hostettler.net/blog/2014/05/18/fakes-stubs-dummy-mocks-doubles-and-all-that/[test doubles] here, but about values of C/C++ ``double`` type (a.k.a double float.) -*Cgreen* is designed to make it easy and natural to write assertions -and expectations. Many functions can be used for multiple data types, -e.g. `is_equal_to()` applies to all integer type values, actually -including pointers. - -But the C language has its quirks. One of them is the fact that it is -impossible to inspect the datatypes of values during run-time. This -has e.g. forced the introduction of `is_equal_to_string()` to enable -string comparisons. +*Cgreen* is designed to make it easy and natural to write assertions and expectations. +Many functions can be used for multiple data types, e.g. `is_equal_to()` applies to all integer type values, actually including pointers. + +But the C language has its quirks. +One of them is the fact that it is impossible to inspect the datatypes of values during run-time. +This has e.g. forced the introduction of `is_equal_to_string()` to enable string comparisons. ==== Assertions and Constraints -When it comes to double typed values this has spilled over even further. For -double typed values we have +When it comes to double typed values this has spilled over even further. +For double typed values we have |========================================================================== | *Constraint* @@ -2060,8 +2120,8 @@ | `significant_figures_for_assert_double_are(int figures)` |==================== -And of course they are designed to go together. So, if you want to assert -an expression yeilding a `double` typed value, you need to combine them: +And of course they are designed to go together. +So, if you want to assert an expression yeilding a `double` typed value, you need to combine them: [source,c] ----------------------------- @@ -2079,31 +2139,22 @@ [[double_mocks]] -==== Mocks +==== Double Mocks + +The general mechanism *Cgreen* uses to transport values to and from mock functions is based on the simple idea that most types fit into a "large" integer and can be type converted to and from whatever type you need. -The general mechanism *Cgreen* uses to transport values to and from -mock functions is based on the simple idea that most types fit into a -"large" integer and can be type converted to and from whatever type -you need. - -Since a `double float` will not fit into the same memory space as an -integer *Cgreen* handles that by encapsulating ("boxing") the `double` -into an area which is represented by the pointer to it. And that -pointer can fit into the integer type value (`intptr_t`) that *Cgreen* -uses to transport values into and out of `mock()`. To get the value -back you "unbox" it. +Since a `double float` will not fit into the same memory space as an integer *Cgreen* handles that by encapsulating ("boxing") the `double` into an area which is represented by the pointer to it. +And that pointer can fit into the integer type value (`intptr_t`) that *Cgreen* uses to transport values into and out of `mock()`. +To get the value back you "unbox" it. -There are two possible uses of `double` that you need to be -aware of +There are two possible uses of `double` that you need to be aware of -1. When a parameter to the mocked function is of `double` type and -needs to be matched in an constraint in an `expect` call. +1. When a parameter to the mocked function is of `double` type and needs to be matched in an constraint in an `expect()` call. 2. When the mock function itself should return a `double` type value. -In the test you should use the special `double` type constraints and -the `will_return_double()` convenience function. In the mock function -you will have to take care to box and unbox as required. +In the test you should use the special `double` type constraints and the `will_return_double()` convenience function. +In the mock function you will have to take care to box and unbox as required. |================================================================= |*Boxing and unboxing in mock functions* | *Description* @@ -2125,37 +2176,26 @@ `double`, it will have to be used as `box_double(d)` in the call to `mock()`. <2> The corresponding `expect()` uses a double constraint. -<3> The mock function in this small example also returns a `double`. The -`expect()` uses `will_return_double()` so the mock function needs to -unbox the return value from `mock()` to be able to return the `double` -type value. +<3> The mock function in this small example also returns a `double`. +The `expect()` uses `will_return_double()` so the mock function needs to unbox the return value from `mock()` to be able to return the `double` type value. -NOTE: Strange errors may occur if you box and/or unbox or combine -`double` constraints incorrectly. +NOTE: Strange errors may occur if you box and/or unbox or combine `double` constraints incorrectly. [[floating_point_comparison_algorithm]] -==== Details of floating point comparison algorithm +==== Details of Floating Point Comparison Algorithm -The number of significant digits set with -`significant_figures_for_assert_double_are()` specifies a _relative_ -tolerance. Cgreen considers two double precision numbers +x+ and +y+ -equal if their difference normalized by the larger of the two is -smaller than +10^(1 - significant_figures)^+. Mathematically, we check -that +|x - y| < max(|x|, |y|) * 10^(1 - significant_figures)^+. - -Well documented subtleties arise when comparing floating point numbers -close to zero using this algorithm. The article -https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/[Comparing -Floating Point Numbers, 2012 Edition] by Bruce Dawson has an excellent -discussion of the issue. The essence of the problem can be appreciated -if we consider the special case where +y == 0+. In that case, our -condition reduces to +|x| < |x| * 10^(1 - significant_figures)^+. -After cancelling +|x|+ this simplifies to +1 < 10^(1 - -significant_figures)^+. But this is only true if +significant_figures -< 1+. In words this can be summarized by saying that, in a relative -sense, _all_ numbers are very different from zero. To circumvent this -difficulty we recommend to use a constraint of the following form when -comparing numbers close to zero: +The number of significant digits set with `significant_figures_for_assert_double_are()` specifies a _relative_ tolerance. +Cgreen considers two double precision numbers +x+ and +y+ equal if their difference normalized by the larger of the two is smaller than +10^(1 - significant_figures)^+. +Mathematically, we check that +|x - y| < max(|x|, |y|) * 10^(1 - significant_figures)^+. + +Well documented subtleties arise when comparing floating point numbers close to zero using this algorithm. +The article https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/[Comparing Floating Point Numbers, 2012 Edition] by Bruce Dawson has an excellent discussion of the issue. +The essence of the problem can be appreciated if we consider the special case where +y == 0+. +In that case, our condition reduces to +|x| < |x| * 10^(1 - significant_figures)^+. +After cancelling +|x|+ this simplifies to +1 < 10^(1 - significant_figures)^+. +But this is only true if +significant_figures < 1+. +In words this can be summarized by saying that, in a relative sense, _all_ numbers are very different from zero. +To circumvent this difficulty we recommend to use a constraint of the following form when comparing numbers close to zero: [source,c] ----------------------- @@ -2165,48 +2205,36 @@ === Using Cgreen with C{pp} -The examples in this guide uses the C langauge to shows how to use -*CGreen*. You can also use *CGreen* with C++. +The examples in this guide uses the C langauge to shows how to use *CGreen*. You can also use *CGreen* with C++. -NOTE: The following needs expansion and more details as the support -for C++ is extended. +NOTE: The following needs expansion and more details as the support for C++ is extended. All you have to do is * Use the `cgreen` namespace by adding `using namespace cgreen;` at the beginning of the file with your tests -There is also one extra feature when you use C++, the `assert_throws` -function. +There is also one extra feature when you use C++, the `assert_throws` function. -NOTE: If you use the runner, as described in <>, and thus link -your tests into a shared library, don't forget to link it with the -same C++ library that was used to create the `cgreen-runner`. +NOTE: If you use the autodiscovering runner, as described in <>, and thus link your tests into a shared library, don't forget to link it with the same C++ library that was used to create the `cgreen-runner`. == Context, System Under Test & Suites -As mentioned earlier, *Cgreen* promotes the behaviour driven -style of test driving code. The thinking behind BDD is that we don't -really want to test anything, if we just could specify the behaviour -of our code and ensure that it actually behaves this way we would be -fine. - -This might seem like an age old dream, but when you think about it, -there is actually very little difference in the mechanics from -vanillla TDD. First we write how we want it, then implement it. But -the small change in wording, from `test´ to `behaviour´, from `test -that´ to `ensure that´, makes a huge difference in thinking, and also -very often in quality of the resulting code. +As mentioned earlier, *Cgreen* promotes the behaviour driven style of test driving code. +The thinking behind BDD is that we don't really want to test anything, if we just could specify the behaviour of our code and ensure that it actually behaves this way we would be fine. + +This might seem like an age old dream, but when you think about it, there is actually very little difference in the mechanics from vanillla TDD. +First we write how we want it, then implement it. +But the small change in wording, from `test´ to `behaviour´, from `test that´ to `ensure that´, makes a huge difference in thinking, and also very often in quality of the resulting code. === The SUT - System Under Test -Since BDD talks about behaviour, there has to be something that we can -talk about as having that wanted behaviour. This is usually called the -SUT, the System Under Test. The "system" might be whatever we are testing, -such as a C module ("MUT"), class ("CUT"), object ("OUT"), function ("FUT") -or method ("MUT"). We will stick with SUT in this document. To use *Cgreen* -in BDD-ish mode you must define a name for it. +Since BDD talks about behaviour, there has to be something that we can talk about as having that wanted behaviour. +This is usually called the SUT, the System Under Test. +The "system" might be whatever we are testing, such as a C module ("MUT"), class ("CUT"), object ("OUT"), function ("FUT") or method ("MUT"). +We will stick with SUT in this document. +To use *Cgreen* in BDD-ish mode you must define a name for it. [source, c] ----------------------- @@ -2214,10 +2242,9 @@ Describe(SUT); ----------------------- -*Cgreen* supports C++ and there you naturally have the objects and -also the Class Under Test. But in plain C you will have to think about -what is actually the "class" under test. E.g. in `sort_test.c` you might -see +*Cgreen* supports C++ and there you naturally have the objects and also the Class Under Test. +But in plain C you will have to think about what is actually the "class" under test. +E.g. in `sort_test.c` you might see [source, c] --------------------- @@ -2229,27 +2256,20 @@ } --------------------- -In this example you can clearly see what difference the BDD-ish style -makes when it comes to naming. Convention, and natural language, -dictates that typical names for what TDD would call tests, now starts -with 'can' or 'finds' or other verbs, which makes the specification so -much easier to read. - -Yes, I wrote 'specification'. Because that is how BDD views what TDD -basically calls a test suite. The suite specifies the behaviour of a -`class´. (That's why some BDD frameworks draw on 'spec', like -*RSpec*.) +In this example you can clearly see what difference the BDD-ish style makes when it comes to naming. +Convention, and natural language, dictates that typical names for what TDD would call tests, now starts with 'can' or 'finds' or other verbs, which makes the specification so much easier to read. + +Yes, I wrote 'specification'. Because that is how BDD views what TDD basically calls a test suite. The suite specifies the behaviour of a `class´. +(That's why some BDD frameworks draw on 'spec', like *RSpec*.) === Contexts and Before and After -The complete specification of the behaviour of a SUT might become long -and require various forms of setup. When using TDD style you -would probably break this up into multiple suites having their own -`setup()` and `teardown()`. +The complete specification of the behaviour of a SUT might become long and require various forms of setup. +When using TDD style you would probably break this up into multiple suites having their own `setup()` and `teardown()`. -With BDD-ish style we could consider a suite as a behaviour -specification for our SUT 'in a particular context'. E.g. +With BDD-ish style we could consider a suite as a behaviour specification for our SUT 'in a particular context'. +E.g. [source, c] ------------------------ @@ -2274,13 +2294,10 @@ } ------------------------ -The 'context' would then be `shopping_basket_for_returning_customer`, -with the SUT being the shopping basket 'class'. +The 'context' would then be `shopping_basket_for_returning_customer`, with the SUT being the shopping basket 'class'. -So 'context', 'system under test' and 'suite' are mostly -interchangable concepts in *Cgreen* lingo. It's a named group of -'tests' that share the same `BeforeEach` and `AfterEach` and lives in -the same source file. +So 'context', 'system under test' and 'suite' are mostly interchangable concepts in *Cgreen* lingo. +It's a named group of 'tests' that share the same `BeforeEach` and `AfterEach` and lives in the same source file. @@ -2290,83 +2307,65 @@ === Forgot to Add Your Test? -When we write a new test we focus on the details about the test we are -trying to write. And writing tests is no trivial matter so this might -well take a lot of brain power. - -So, it comes as no big surprise, that sometimes you write your test -and then forget to add it to the suite. When we run it it appears that it -passed on the first try! Although this *should* really make you -suspicious, sometimes you get so happy that you just continue with -churning out more tests and more code. It's not until some (possibly -looong) time later that you realize, after much headache and -debugging, that the test did not actually pass. It was never even run! - -There are practices to minimize the risk of this happening, such as -always running the test as soon as you can set up the test. This way -you will see it fail before trying to get it to pass. - -But it is still a practice, something we, as humans, might fail to do -at some point. Usually this happens when we are most stressed and in -need of certainty. +When we write a new test we focus on the details about the test we are trying to write. +And writing tests is no trivial matter so this might well take a lot of brain power. + +So, it comes as no big surprise, that sometimes you write your test and then forget to add it to the suite. +When we run it it appears that it passed on the first try! +Although this *should* really make you suspicious, sometimes you get so happy that you just continue with churning out more tests and more code. +It's not until some (possibly looong) time later that you realize, after much headache and debugging, that the test did not actually pass. +It was never even run! + +There are practices to minimize the risk of this happening, such as always running the test as soon as you can set up the test. +This way you will see it fail before trying to get it to pass. + +But it is still a practice, something we, as humans, might fail to do at some point. +Usually this happens when we are most stressed and in need of certainty. === The Solution - the 'cgreen-runner' -*Cgreen* gives you a tool to avoid not only the risk of this -happening, but also the extra work and extra code. It is called the -`cgreen-runner`. - -The `cgreen-runner` should come with your *Cgreen* installation if -your platform supports the technique that is required, which is -'programatic access to dynamic loading of libraries'. This means -that a program can load an external library of code into memory and -inspect it. Kind of self-inspection, or reflexion. - -So all you have to do is to build a dynamically loadable library of -all tests (and of course your objects under test and other necessary -code). Then you can run the `cgreen-runner` and point it to the -library. The runner will then load the library, enumerate all tests in -it, and run every test. +*Cgreen* gives you a tool to avoid not only the risk of this happening, but also the extra work and extra code. +It is called the `cgreen-runner`. + +The `cgreen-runner` should come with your *Cgreen* installation if your platform supports the technique that is required, which is 'programatic access to dynamic loading of libraries'. +This means that a program can load an external library of code into memory and inspect it. +Kind of self-inspection, or reflexion. + +So all you have to do is to build a dynamically loadable library of all tests (and of course your objects under test and other necessary code). +Then you can run the `cgreen-runner` and point it to the library. +The runner will then load the library, enumerate all tests in it, and run every test. It's automatic, and there is nothing to forget. +[[runner]] === Using the Runner -Assuming your tests are in `first_test.c` the typical command to -build your library using `gcc` would be +Assuming your tests are in `first_test.c` the typical command to build your library using `gcc` would be -------------------------- $ gcc -shared -o first_test.so -fPIC first_test.c -lcgreen -------------------------- -The `-fPIC` means to generate 'position independent code' which is -required if you want to load the library dynamically. To explicitly -state this is required on many platforms. - -How to build a dynamically loadable shared library might vary a lot -depending on your platform. Can't really help you there, sorry! - -As soon as we have linked it we can run the tests using the -`cgreen-runner` by just giving it the shared, dynamically loadable, -object library as an argument: +The `-fPIC` means to generate 'position independent code' which is required if you want to load the library dynamically. +To explicitly state this is required on many platforms. + +How to build a dynamically loadable shared library might vary a lot depending on your platform. +Can't really help you there, sorry! + +As soon as we have linked it we can run the tests using the `cgreen-runner` by just giving it the shared, dynamically loadable, object library as an argument: ------------------------- $ cgreen-runner first_test.so include::tutorial_src/runner1.out[] ------------------------- -More or less exactly the same output as when we ran our first test in -the beginning of this quickstart tutorial. We can see that the top -level of the tests will be named as the library it was discovered in, -and the second level is the context for our System Under Test, in -this case 'Cgreen'. We also see that the context is mentioned in the -failure message, giving a fairly obvious `Cgreen -> fails_this_test`. - -Now we can actually delete the main function in our source code. We -don't need all this, since the runner will discover all tests -automatically. +More or less exactly the same output as when we ran our first test in the beginning of this quickstart tutorial. We can see that the top level of the tests will be named as the library it was discovered in, and the second level is the context for our System Under Test, in this case 'Cgreen'. +We also see that the context is mentioned in the failure message, giving a fairly obvious `Cgreen -> fails_this_test`. + +Now we can actually delete the main function in our source code. +We don't need all this, since the runner will discover all tests automatically. [source,c] ------------------------ @@ -2382,12 +2381,10 @@ include::tutorial_src/runner2.out[] ------------------------- -We recommend the BDD notation to discover tests, and you indicate -which context the test we want to run is in. In this example it is -`Cgreen` so the test should be refered to as `Cgreen:this_test_should_fail`. +We recommend the BDD notation to discover tests, and you indicate which context the test we want to run is in. +In this example it is `Cgreen` so the test should be refered to as `Cgreen:this_test_should_fail`. -If you don't use the BDD notation there is actually a context anyway, -it is called `default`. +If you don't use the BDD notation there is actually a context anyway, it is called `default`. [[runner-options]] @@ -2406,12 +2403,10 @@ --colours:: Use colours (or colors) to emphasis result (requires ANSI-capable terminal) --quiet:: Be more quiet -The `verbose` option is particularly handy since it will give you the -actual names of all tests discovered. So if you have long test names -you can avoid mistyping them by copying and pasting from the output of -`cgreen-runner --verbose`. It will also give the mangled name of the -test which should make it easier to find in the debugger. Here's an -example: +The `verbose` option is particularly handy since it will give you the actual names of all tests discovered. +So if you have long test names you can avoid mistyping them by copying and pasting from the output of `cgreen-runner --verbose`. +It will also give the mangled name of the test which should make it easier to find in the debugger. +Here's an example: ------------------------ include::tutorial_src/runner3.out[] @@ -2420,19 +2415,15 @@ === Selecting Tests To Run -You can name a single test to be run by giving it as the last argument -on the command line. The name should be in the format -`:`. If not obvious you can get that name by using the -`--verbose` command option which will show you all tests discovered -and both there C/C++ and Cgreen names. Copying the Cgreen name from -that output is an easy way to run only that particular test. When a -single test is named it is run using `run_single_test()`. As described -in <> this means that it is __not__ protected by -`fork()`-ing it to run in its own process. - -The `cgreen-runner` supports selecting tests with limited pattern -matching. Using an asterisk as a simple 'match many' symbol you can -say things like +You can name a single test to be run by giving it as the last argument on the command line. +The name should be in the format `:`. +If not obvious you can get that name by using the `--verbose` command option which will show you all tests discovered and both there C/C++ and Cgreen names. +Copying the Cgreen name from that output is an easy way to run only that particular test. +When a single test is named it is run using `run_single_test()`. +As described in <> this means that it is __not__ protected by `fork()`-ing it to run in its own process. + +The `cgreen-runner` supports selecting tests with limited pattern matching. +Using an asterisk as a simple 'match many' symbol you can say things like -------------------- $ cgreen-runner Cgreen:* @@ -2442,8 +2433,7 @@ === Multiple Test Libraries -You can run tests in multiple libraries in one go by adding them -to the `cgreen-runner` command: +You can run tests in multiple libraries in one go by adding them to the `cgreen-runner` command: ----------------------- $ cgreen-runner first_set.so second_set.so ... @@ -2452,54 +2442,43 @@ === Setup, Teardown and Custom Reporters -The `cgreen-runner` will only run setup and teardown functions if you -use the BDD-ish style with `BeforeEach()` and `AfterEach()` as -described above. The runner does not pickup `setup()` and `teardown()` -added to suites, because it actually doesn't run suites. It discovers -all tests and runs them one by one. The macros required by the BDD-ish -style ensures that the corresponding `BeforeEach()` and `AfterEach()` -are run before and after each test. - -CAUTION: The `cgreen-runner` __will__ discover your tests in a shared -library even if you don't use the BDD-ish style. But it will not be -able to find and run the `setup()` and/or `teardown()` attached to your -suite(s). This will probably cause your tests to fail or crash. - -In case you have non-BDD style tests __without__ any `setup()` and/or -`teardown()` you can still use the runner. The default suite/context -where the tests live in this case is called `default`. But why don't -you convert your tests to BDD notation? This removes the risk of -frustrating trouble-shooting when you added `setup()` -and `teardown()` and can't understand why they are not run... - -So, the runner encourages you to use the BDD notation. But since we -recommend that you do anyway, that's no extra problem if you are -starting out from scratch. But see <> for some easy -tips on how to get you there if you already have non-BDD -tests. - -You can choose between the TextReporter, which we have been seeing so -far, and the built-in JUnit/Ant compatible XML-reporter using the -`--xml` option. But it is not currently possible to use custom -reporters as outlined in <> with the runner. +The `cgreen-runner` will only run setup and teardown functions if you use the BDD-ish style with `BeforeEach()` and `AfterEach()` as described above. +The runner does not pickup `setup()` and `teardown()` added to suites, because it actually doesn't run suites. +It discovers all tests and runs them one by one. +The macros required by the BDD-ish style ensures that the corresponding `BeforeEach()` and `AfterEach()` are run before and after each test. + +CAUTION: The `cgreen-runner` __will__ discover your tests in a shared library even if you don't use the BDD-ish style. +But it will not be able to find and run the `setup()` and/or `teardown()` attached to your suite(s). +This will probably cause your tests to fail or crash. + +In case you have non-BDD style tests __without__ any `setup()` and/or `teardown()` you can still use the runner. +The default suite/context where the tests live in this case is called `default`. +But why don't you convert your tests to BDD notation? +This removes the risk of frustrating trouble-shooting when you added `setup()` and `teardown()` and can't understand why they are not run... + +So, the runner encourages you to use the BDD notation. +But since we recommend that you do anyway, that's no extra problem if you are starting out from scratch. +But see <> for some easy tips on how to get you there if you already have non-BDD tests. -If you require another custom reporter you need to resort to the -standard, programatic, way of invoking your tests. For now... +You can choose between the TextReporter, which we have been seeing so far, and the built-in JUnit/Ant compatible XML-reporter using the `--xml` option. +But it is not currently possible to use custom reporters as outlined in <> with the runner. + +If you require another custom reporter you need to resort to the standard, programatic, way of invoking your tests. +For now... [[xensure]] === Skipping Tests -Sometimes you find that you need to temporarily remove a test, perhaps -to do a refactoring when you have a failing test. Ignoring that test will -allow you to do the refactoring while still in the green. - -An old practice is then to comment it out. That is a slightly cumbersome. -It is also hazardous habit as there is no indication of a missing test if you -forget to uncomment it when you are done. +Sometimes you find that you need to temporarily remove a test, perhaps to do a refactoring when you have a failing test. +Ignoring that test will allow you to do the refactoring while still in the green. + +An old practice is then to comment it out. +That is a slightly cumbersome. +It is also hazardous habit as there is no indication of a missing test if you forget to uncomment it when you are done. -*Cgreen* offers a much better solution. You can just add an 'x' infront -of the `Ensure` for the test and that test will be skipped. +*Cgreen* offers a much better solution. +You can just add an 'x' infront of the `Ensure` for the test and that test will be skipped. [source, C] ---------------------------- @@ -2510,25 +2489,22 @@ ... ---------------------------- -With this method, it is a one character change to temporarily ignore, -and un-ignore, a test. It is also easily found using text searches through -a complete source tree. *Cgreen* will also tally the skipped tests, so -it is clearly visible that you have some skipped test when you run them. +With this method, it is a one character change to temporarily ignore, and un-ignore, a test. +It is also easily found using text searches through a complete source tree. +*Cgreen* will also tally the skipped tests, so it is clearly visible that you have some skipped test when you run them. [[changing_style]] == Changing Style -If you already have some TDD style *Cgreen* test suites, it is quite -easy to change them over to BDD-ish style. Here are the steps required +If you already have some TDD style *Cgreen* test suites, it is quite easy to change them over to BDD-ish style. +Here are the steps required * Add `Describe(SUT);` -* Turn your current setup function into a `BeforeEach()` definition by -changing its signature to match the macro, or simply call the existing -setup function from the BeforeEach(). If you don't have any setup function -you still need to define an empty `BeforeEach()`. +* Turn your current setup function into a `BeforeEach()` definition by changing its signature to match the macro, or simply call the existing setup function from the BeforeEach(). +If you don't have any setup function you still need to define an empty `BeforeEach()`. * Ditto for `AfterEach()`. @@ -2545,10 +2521,9 @@ you can do that by keeping the setup and teardown functions and their corresponding `set_`-calls. -It's nice that this is a simple process, because you can change over -from TDD style to BDD-ish style in small steps. You can convert one source -file at a time, by just following the recipe above. Everything will -still work as before but your tests and code will likely improve. +It's nice that this is a simple process, because you can change over from TDD style to BDD-ish style in small steps. +You can convert one source file at a time, by just following the recipe above. +Everything will still work as before but your tests and code will likely improve. And once you have changed style you can fully benefit from the automatic discovery of tests as described in <>. @@ -2568,18 +2543,17 @@ return run_test_suite(our_tests(), create_text_reporter()); ----------------------- -We can change the reporting mechanism just by changing this -call to create another reporter. +We can change the reporting mechanism just by changing this call to create another reporter. [[builtin_reporters]] === Built-in Reporters -*Cgreen* has the following built-in reporters that you can choose from -when your code runs the test suite. +*Cgreen* has the following built-in reporters that you can choose from when your code runs the test suite. [options="header", cols=4] +[options="header", cols=4] |==================================================================================== | Reporter | Purpose | Signature | Note @@ -2593,29 +2567,22 @@ | `create_cdash_reporter(CDashInfo *info)` | `info` is a structure defined in `cdash_reporter.h` |==================================================================================== -If you write a runner function like in most examples above, you can -just substitute which runner to create. If you use the `cgreen-runner` -(<>) to dynamically find all your tests you can force -it to use the XML-reporter with the `-x ` option. +If you write a runner function like in most examples above, you can just substitute which runner to create. +If you use the `cgreen-runner` (<>) to dynamically find all your tests you can force it to use the XML-reporter with the `-x ` option. -NOTE: Currently `cgreen-runner` only supports the test and XML -built-in reporters. +NOTE: Currently `cgreen-runner` only supports the built-in text and XML reporters. === Rolling Our Own -Althoug *Cgreen* has a number of options, there are times when you'd like -a different output from the reporter, the CUTE and CDash reporters are -examples that grew out of such a need. +Although *Cgreen* has a number of options, there are times when you'd like a different output from the reporter, the CUTE and CDash reporters are examples that grew out of such a need. -Perhaps your Continuous Integration server want the result in a different -format, or you just don't like the text reporter... +Perhaps your Continuous Integration server want the result in a different format, or you just don't like the text reporter... -Writing your own reporter is supported. And we'll go through how that can -be done using an XML-reporter as an example. +Writing your own reporter is supported. +And we'll go through how that can be done using an XML-reporter as an example. -NOTE: *Cgreen* already has an XML-reporter compatible with ANT/Jenkins, see -<>. +NOTE: *Cgreen* already has an XML-reporter compatible with ANT/Jenkins, see <>. Here is the code for `create_text_reporter()`... @@ -2637,22 +2604,15 @@ } ----------------------- -The `TestReporter` structure contains function pointers that control -the reporting. When called from `create_reporter()` constructor, -these pointers are set up with functions that display nothing. The -text reporter code replaces these with something more dramatic, and -then returns a pointer to this new object. Thus the -`create_text_reporter()` function effectively extends the object from -`create_reporter()`. - -The text reporter only outputs content at the start of the first test, -at the end of the test run to display the results, when a failure -occurs, and when a test fails to complete. A quick look at the -`text_reporter.c` file in *Cgreen* reveals that the overrides just -output a message and chain to the versions in `reporter.h`. +The `TestReporter` structure contains function pointers that control the reporting. +When called from `create_reporter()` constructor, these pointers are set up with functions that display nothing. +The text reporter code replaces these with something more dramatic, and then returns a pointer to this new object. +Thus the `create_text_reporter()` function effectively extends the object from `create_reporter()`. + +The text reporter only outputs content at the start of the first test, at the end of the test run to display the results, when a failure occurs, and when a test fails to complete. +A quick look at the `text_reporter.c` file in *Cgreen* reveals that the overrides just output a message and chain to the versions in `reporter.h`. -To change the reporting mechanism ourselves, we just have to know a little -about the methods in the `TestReporter` structure. +To change the reporting mechanism ourselves, we just have to know a little about the methods in the `TestReporter` structure. === The TestReporter Structure @@ -2693,42 +2653,30 @@ `void (*destroy)(TestReporter *reporter)`:: -This is the destructor for the default structure. If this is -overridden, then the overriding function must call -`destroy_reporter(TestReporter *reporter)` to finish the clean up. +This is the destructor for the default structure. +If this is overridden, then the overriding function must call `destroy_reporter(TestReporter *reporter)` to finish the clean up. `void (*start_suite)(TestReporter *reporter, const char *name, const int count)`:: -This is the first of the callbacks. At the start of -each test suite *Cgreen* will call this method on the reporter with -the name of the suite being entered and the number of tests in that -suite. The default version keeps track of the stack of tests in the -`breadcrumb` pointer of `TestReporter`. If you make use of the -breadcrumb functions, as the defaults do, then you will need to call -`reporter_start_suite()` to keep the book-keeping in sync. +This is the first of the callbacks. +At the start of each test suite *Cgreen* will call this method on the reporter with the name of the suite being entered and the number of tests in that suite. +The default version keeps track of the stack of tests in the `breadcrumb` pointer of `TestReporter`. +If you make use of the breadcrumb functions, as the defaults do, then you will need to call `reporter_start_suite()` to keep the book-keeping in sync. `void (*start_test)(TestReporter *reporter, const char *name)`:: -At the start of each test *Cgreen* will call this method on the -reporter with the name of the test being entered. Again, the default -version keeps track of the stack of tests in the `breadcrumb` pointer -of `TestReporter`. If you make use of the breadcrumb functions, as the -defaults do, then you will need to call `reporter_start_test()` to keep the -book-keeping in sync. +At the start of each test *Cgreen* will call this method on the reporter with the name of the test being entered. +Again, the default version keeps track of the stack of tests in the `breadcrumb` pointer of `TestReporter`. +If you make use of the breadcrumb functions, as the defaults do, then you will need to call `reporter_start_test()` to keep the book-keeping in sync. `void (*show_pass)(TestReporter *reporter, const char *file, int line, const char *message, va_list arguments)`:: -This method is initially empty as most reporters see little point in -reporting passing tests (but you might do), so there is no need to -chain the call to any other function. Besides the pointer to the -reporter structure, *Cgreen* also passes the file name of the test, -the line number of failed assertion, the message to show and any -additional parameters to substitute into the message. The message -comes in as `printf()` style format string, and so the variable -argument list should match the substitutions. +This method is initially empty as most reporters see little point in reporting passing tests (but you might do), so there is no need to chain the call to any other function. +Besides the pointer to the reporter structure, *Cgreen* also passes the file name of the test, the line number of failed assertion, the message to show and any additional parameters to substitute into the message. +The message comes in as `printf()` style format string, and so the variable argument list should match the substitutions. `void (*show_fail)(TestReporter *reporter, const char *file, int line, const char *message, va_list arguments)`:: @@ -2743,46 +2691,37 @@ `void (*show_incomplete)(TestReporter *reporter, const char *file, int line, const char *message, va_list arguments)`:: -When a test fails to complete, this is the handler that is called. As -it's an unexpected outcome, no message is received, but we do get the -name of the test. The text reporter combines this with the breadcrumb -to produce the exception report. +When a test fails to complete, this is the handler that is called. +As it's an unexpected outcome, no message is received, but we do get the name of the test. +The text reporter combines this with the breadcrumb to produce the exception report. `void (*assert_true)(TestReporter *reporter, const char *file, int line, int result, const char * message, ...)`:: -This is not normally overridden and is really internal. It is the raw -entry point for the test messages from the test suite. By default it -dispatches the call to either `show_pass()` or `show_fail()`. +This is not normally overridden and is really internal. +It is the raw entry point for the test messages from the test suite. +By default it dispatches the call to either `show_pass()` or `show_fail()`. `void (*finish_test)(TestReporter *reporter, const char *file, int line)`:: -The counterpart to the `(*start_test)()` call. It is called on leaving -the test. It needs to be chained to the `reporter_finish()` to keep -track of the breadcrumb book keeping. +The counterpart to the `(*start_test)()` call. +It is called on leaving the test. +It needs to be chained to the `reporter_finish()` to keep track of the breadcrumb book keeping. `void (*finish_suite)(TestReporter *reporter, const char *file, int line)`:: -The counterpart to the `(*start_suite)()` call called on leaving the -test suite, and similar to the `(*finish_test)()` if your reporter -needs a handle on that event too. The default text reporter chains -both this and `(*finish_test)()` to the same function where it figures -out if it is the end of the top level suite. If so, it prints the -familiar summary of passes and fails. - -NOTE: The `show_fail()` and `show_pass()` functions are called from -the child process, i.e. the isolated process that is `fork()`:ed to -run a single test case. All others, notably `start_...()`, -`finish_...()`, `show_incomplete()` and `show_skip()` are run in the -main (parent) process. This fact might be important since the -processes do not share memory. Information is passed from the child to -the parent using messaging performed within the `show_...()` -functions. +The counterpart to the `(*start_suite)()` call called on leaving the test suite, and similar to the `(*finish_test)()` if your reporter needs a handle on that event too. +The default text reporter chains both this and `(*finish_test)()` to the same function where it figures out if it is the end of the top level suite. +If so, it prints the familiar summary of passes and fails. + +NOTE: The `show_fail()` and `show_pass()` functions are called from the child process, i.e. the isolated process that is `fork()`:ed to run a single test case. +All others, notably `start_...()`, `finish_...()`, `show_incomplete()` and `show_skip()` are run in the main (parent) process. +This fact might be important since the processes do not share memory. +Information is passed from the child to the parent using messaging performed within the `show_...()` functions. -The second block is simply resources and book keeping that the reporter -can use to liven up the messages... +The second block is simply resources and book keeping that the reporter can use to liven up the messages... [horizontal] `passes`:: The number of passes so far. @@ -2792,20 +2731,16 @@ `breadcrumb`:: This is a pointer to the list of test names in the stack. The `breadcrumb` pointer is different and needs a little explanation. -Basically it is a stack, analogous to the breadcrumb trail you see on -websites. Everytime a `start()` handler is invoked, the name is -placed in this stack. When a `finish()` message handler is invoked, a -name is popped off. - -There are a bunch of utility functions in `cgreen/breadcrumb.h` that -can read the state of this stack. Most useful are -`get_current_from_breadcrumb()` which takes the breadcrumb pointer and -returns the current test name, and `get_breadcrumb_depth()` which gives -the current depth of the stack. A depth of zero means that the test -run has finished. +Basically it is a stack, analogous to the breadcrumb trail you see on websites. +Everytime a `start()` handler is invoked, the name is placed in this stack. +When a `finish()` message handler is invoked, a name is popped off. + +There are a bunch of utility functions in `cgreen/breadcrumb.h` that can read the state of this stack. +Most useful are `get_current_from_breadcrumb()` which takes the breadcrumb pointer and returns the current test name, and `get_breadcrumb_depth()` which gives the current depth of the stack. +A depth of zero means that the test run has finished. -If you need to traverse all the names in the breadcrumb, then you can -call `walk_breadcrumb()`. Here is the full signature... +If you need to traverse all the names in the breadcrumb, then you can call `walk_breadcrumb()`. +Here is the full signature... [source,c] ----------------------- @@ -2815,33 +2750,28 @@ The `void (*walker)(const char *, void *)` is a callback that will be passed the name of the test suite for each level of nesting. -It is also passed the `memo` pointer that was passed to the -`walk_breadcrumb()` call. You can use this pointer for anything you -want, as all *Cgreen* does is pass it from call to call. This is so -aggregate information can be kept track of whilst still being -reentrant. +It is also passed the `memo` pointer that was passed to the `walk_breadcrumb()` call. +You can use this pointer for anything you want, as all *Cgreen* does is pass it from call to call. +This is so aggregate information can be kept track of whilst still being reentrant. The last parts of the `TestReporter` structure are... [horizontal] -`ipc`:: This is an internal structure for handling the messaging between reporter -and test suite. You shouldn't touch this. +`ipc`:: This is an internal structure for handling the messaging between reporter and test suite. +You shouldn't touch this. `memo`:: By contrast, this is a spare pointer for your own expansion. -`options`:: A pointer to a reporter specific structure that can be -used to set options. E.g. the textreporter defines the structure -`TextReporterOptions` which can be used by calling code to define the -use of colors when printing passes and failures. You set it with -`set_reporter_options(*void)`. +`options`:: A pointer to a reporter specific structure that can be used to set options. +E.g. the textreporter defines the structure `TextReporterOptions` which can be used by calling code to define the use of colors when printing passes and failures. +You set it with `set_reporter_options(*void)`. === An Example XML Reporter -Let's make things real with an example. Suppose we want to send the -output from *Cgreen* in XML format, say for storing in a repository or -for sending across the network. +Let's make things real with an example. +Suppose we want to send the output from *Cgreen* in XML format, say for storing in a repository or for sending across the network. NOTE: The `cgreen-runner` already has an XML-reporter that you can use if you need to produce Jenkins/ANT compatible XML output. @@ -2877,14 +2807,11 @@ include::tutorial_src/test_as_xml0.c[] ----------------------- -We can't use the auto-discovering `cgreen-runner` (see -<>) here since we need to ensure that the nested -suites are reported as a nested xml structure. And we're not actually -writing real tests, just something that we can use to drive our new -reporter. +We can't use the auto-discovering `cgreen-runner` (see <>) here since we need to ensure that the nested suites are reported as a nested xml structure. +And we're not actually writing real tests, just something that we can use to drive our new reporter. -The text reporter is used just to confirm that everything is -working. So far it is. +The text reporter is used just to confirm that everything is working. +So far it is. ----------------------- include::tutorial_src/test_as_xml0.out[] @@ -2934,9 +2861,7 @@ include::tutorial_src/xml_reporter1.c[] ----------------------- -Although chaining to the underlying `reporter_start_*()` -and `reporter_finish_*()` functions is optional, I want to -make use of some of the facilities later. +Although chaining to the underlying `reporter_start_*()` and `reporter_finish_*()` functions is optional, I want to make use of some of the facilities later. Our output meanwhile, is making its first tentative steps... @@ -2956,9 +2881,8 @@ include::tutorial_src/xml_reporter2.c[lines=37..-1] ----------------------- -We have to use `vprintf()` to handle the variable argument list passed -to us. This will probably mean including the `stdarg.h` header as -well as `stdio.h`. +We have to use `vprintf()` to handle the variable argument list passed to us. +This will probably mean including the `stdarg.h` header as well as `stdio.h`. This gets us pretty close to what we want... @@ -2978,27 +2902,22 @@ include::tutorial_src/xml_reporter3.c[lines=44..-1] ----------------------- -All that's left then is the XML declaration and the thorny issue of -indenting. Although the indenting is not strictly necessary, it would -make the output a lot more readable. - -Given that the test depth is kept track of for us with the -`breadcrumb` object in the `TestReporter` structure, indentation will -actually be quite simple. We'll add an `indent()` function that -outputs the correct number of tabs... +All that's left then is the XML declaration and the thorny issue of indenting. +Although the indenting is not strictly necessary, it would make the output a lot more readable. + +Given that the test depth is kept track of for us with the `breadcrumb` object in the `TestReporter` structure, indentation will actually be quite simple. +We'll add an `indent()` function that outputs the correct number of tabs... [source,c] ----------------------- include::tutorial_src/xml_reporter4.c[lines=7..12] ----------------------- -The `get_breadcrumb_depth()` function just gives the current test -depth as recorded in the reporters breadcrumb (from -`cgreen/breadcrumb.h`). As that is just the number of tabs to output, -the implementation is trivial. +The `get_breadcrumb_depth()` function just gives the current test depth as recorded in the reporters breadcrumb (from `cgreen/breadcrumb.h`). +As that is just the number of tabs to output, the implementation is trivial. -We can then use this function in the rest of the code. Here is the -complete listing... +We can then use this function in the rest of the code. +Here is the complete listing... [source,c] ----------------------- @@ -3018,19 +2937,17 @@ `syslog`, talk to IDE plug-ins, paint pretty printed documents or just return a boolean for monitoring purposes. - == Advanced Usage === Custom Constraints -Sometimes the built-in constraints that *Cgreen* provide are not -sufficient. With *Cgreen* it is possible to create custom constraints, -although you will be depending on some internal structures if you do -so. - -Here's how to implement a simple example custom constraint that -asserts that the value is bigger than 5. We'll implement this using a -static constraint since it does not take any parameter. +Sometimes the built-in constraints that *Cgreen* provide are not sufficient. +With *Cgreen* it is possible to create custom constraints, although you will be depending on some internal structures if you do so. + +Here's how to implement a simple example custom constraint that asserts that the value is bigger than 5. +We'll implement this using a static constraint since it does not take any parameter. + +WARNING: static constraints are a bad idea... First we need the actual compare function: @@ -3051,21 +2968,16 @@ include::tutorial_src/custom_constraint1.c[lines=13..26] ----------------------------- -This implementation can use a statically declared `Constraint` -structure that is prefilled since it does not need to store the value -to be checked. This static custom constraint can then be used directly -in the `assert` like this: +This implementation can use a statically declared `Constraint` structure that is prefilled since it does not need to store the value to be checked. +This static custom constraint can then be used directly in the `assert` like this: [source,c] ----------------------------- include::tutorial_src/custom_constraint1.c[lines=28..31] ----------------------------- -To create a custom constraint that takes an input parameter, we need -to add a function that creates a constraint structure that correctly -saves the value to be checked, and, for convenience, a macro. This -time we need to dig into how *Cgreen* stores expected values and we'll -also make use of *Cgreen*'s utility function `string_dup()`. +To create a custom constraint that takes an input parameter, we need to add a function that creates a constraint structure that correctly saves the value to be checked, and, for convenience, a macro. +This time we need to dig into how *Cgreen* stores expected values and we'll also make use of *Cgreen*'s utility function `string_dup()`. [source,c] ----------------------------- @@ -3082,11 +2994,8 @@ include::tutorial_src/custom_constraint2.c[lines=31..33] ----------------------------- -The last, and definitely more complex, example is a constraint that -takes two structures and compares fields in them. The constraint will, -given a structure representing a piece and another structure -representing a box, check if the piece can fit inside the box using a -size field. +The last, and definitely more complex, example is a constraint that takes two structures and compares fields in them. +The constraint will, given a structure representing a piece and another structure representing a box, check if the piece can fit inside the box using a size field. Assuming two "application" structures with `size` fields: @@ -3110,10 +3019,8 @@ include::tutorial_src/custom_constraint3.c[lines=19..22] ----------------------------- -And this time we can't rely on *Cgreen*'s checker and message -generating function `test_want()` which we used in the previous -examples. So we also need a custom function that calls the comparison -and formats a possible error message: +And this time we can't rely on *Cgreen*'s checker and message generating function `test_want()` which we used in the previous examples. +So we also need a custom function that calls the comparison and formats a possible error message: [source,c] ----------------------------- @@ -3128,34 +3035,26 @@ include::tutorial_src/custom_constraint3.c[lines=38..52] ----------------------------- -CAUTION: As stated above, using custom constraints makes your tests -vulnurable to changes in *Cgreen*'s internals. Hopefully a method to -avoid this will emerge in the future. - -TIP: You can write custom constraints directly in a test file, but -they can of course also be collected into a separately compiled module -which is linked with your tests. +CAUTION: As stated above, using custom constraints makes your tests vulnurable to changes in *Cgreen*'s internals. +Hopefully a method to avoid this will emerge in the future. + +TIP: You can write custom constraints directly in a test file, but they can of course also be collected into a separately compiled module which is linked with your tests. == Hints and Tips -CAUTION: This chapter is intended to contain tips for situations that -you might need some help with, but it is nowhere near complete at this -time. +CAUTION: This chapter is intended to contain tips for situations that you might need some help with, but it is nowhere near complete at this time. [[cgreen-mocker]] === `cgreen-mocker` - Automated Mocking -Are you starting out with *Cgreen* on a largish legacy system? And there -are loads and loads of functions to mock to get a unit under test? +Are you starting out with *Cgreen* on a largish legacy system? +And there are loads and loads of functions to mock to get a unit under test? -You could try the `cgreen-mocker` that is supplied as a contributed -part of the *Cgreen* source distribution. +You could try the `cgreen-mocker` that is supplied as a contributed part of the *Cgreen* source distribution. -It is a Python program that parses C language header files and tries -to create a corresponding `.mock` file where each function declaration -is replaced with a call to `mock()`. +It is a Python program that parses C language header files and tries to create a corresponding `.mock` file where each function declaration is replaced with a call to `mock()`. ----------------- Usage: @@ -3170,8 +3069,8 @@ [source,c] ---------------------------- -extern Value make_integer_value(int integer); -extern Value make_string_value(const char *string); +extern CgreenValue make_cgreen_integer_value(intptr_t integer); +extern CgreenValue make_cgreen_string_value(const char *string); ---------------------------- `cgreen-mocker` will, given that there are no errors, print something @@ -3179,23 +3078,27 @@ [source,c] ---------------------------- -Value make_integer_value(int integer) { +CgreenValue make_cgreen_integer_value(intptr_t integer) { return mock(integer); } -Value make_string_value(const char *string) { +CgreenValue make_cgreen_string_value(const char *string) { return mock(string); } ---------------------------- Of course, you would pipe this output to a file. -To use `cgreen-mocker` you need Python and `pycparser`. The latter -can be found at [https://github.com/eliben/pycparser] or can easily -be installed with +To use `cgreen-mocker` you need Python, and the following packages: + +* `packaging` -- (https://github.com/pypa/packaging) + +* `pycparser` -- (https://github.com/eliben/pycparser) + +These can easily be installed with: ---------------- -$ pip install pycparser +$ pip install -r requirements.txt ---------------- NOTE: `cgreen-mocker` is an unsupported contribution to the *Cgreen* @@ -3204,12 +3107,10 @@ === Compiler Error Messages -Sometimes you might get cryptic and strange error messages from the -compiler. Since *Cgreen* uses some C/C++ macro magic this can happen -and the error messages might not be straight forward to interpret. +Sometimes you might get cryptic and strange error messages from the compiler. +Since *Cgreen* uses some C/C++ macro magic this can happen and the error messages might not be straight forward to interpret. -Here are some examples, but the exact messages differ between compilers -and versions. +Here are some examples, but the exact messages differ between compilers and versions. |========================================================= |*Compiler error message* |*Probable cause...* @@ -3222,13 +3123,11 @@ === Signed, Unsigned, Hex and Byte -*Cgreen* attempts to handle primitive type comparisons with a single -constraint, `is_equal_to()`. This means that it must store the actual -and expected values in a form that will accomodate all possible values -that primitive types might take, typically an `intptr_t`. +*Cgreen* attempts to handle primitive type comparisons with a single constraint, `is_equal_to()`. +This means that it must store the actual and expected values in a form that will accomodate all possible values that primitive types might take, typically an `intptr_t`. -This might sometimes cause unexpected comparisons since all actual -values will be cast to match `intptr_t`, which is a signed value. E.g. +This might sometimes cause unexpected comparisons since all actual values will be cast to match `intptr_t`, which is a signed value. +E.g. [source, c] ------------------------------ @@ -3248,20 +3147,14 @@ expected value: [170] ------------------------------ -This is caused by the C rules forcing an implicit cast of the `signed -char` to `intptr_t` by sign-extension. This might not be what you -expected. The correct solution, by any standard, is to cast the actual -value to `unsigned char` which will then be interpreted correctly. And -the test passes. - -NOTE: Casting to `unsigned` will not always suffice since that is -interpreted as `unsigned int` which will cause a sign-extension from -the `signed char` and might or might not work depending on the size of -`int` on your machine. - -In order to reveal what really happens you might want to see the actual and -expected values in hex. This can easily be done with the -`is_equal_to_hex()`. +This is caused by the C rules forcing an implicit cast of the `signed char` to `intptr_t` by sign-extension. +This might not be what you expected. +The correct solution, by any standard, is to cast the actual value to `unsigned char` which will then be interpreted correctly. +And the test passes. + +NOTE: Casting to `unsigned` will not always suffice since that is interpreted as `unsigned int` which will cause a sign-extension from the `signed char` and might or might not work depending on the size of `int` on your machine. + +In order to reveal what really happens you might want to see the actual and expected values in hex. This can easily be done with the `is_equal_to_hex()`. [source, c] ------------------------------ @@ -3283,9 +3176,8 @@ === Cgreen and Coverage -*Cgreen* is compatible with coverage tools, in particular -`gcov`/`lcov`. So generating coverage data for your application should -be straight forward. +*Cgreen* is compatible with coverage tools, in particular `gcov`/`lcov`. +So generating coverage data for your application should be straight forward. This is what you need to do (using `gcc` or `clang`): @@ -3299,34 +3191,131 @@ === Garbled Output -If the output from your *Cgreen* based tests appear garbled or -duplicated, this can be caused by the way *Cgreen* terminates its -test-running child process. In many unix-like environments the -termination of a child process should be done with `_exit()`. However, -this interfers severily with the ability to collect coverage data. As -this is important for many of us, *Cgreen* instead terminates its -child process with the much cruder `exit()` (note: no underscore). - -Under rare circumstances this might have the unwanted effect of output -becoming garbled and/or duplicated. - -If this happens you can change that behaviour using an environment -variable `CGREEN_CHILD_EXIT_WITH__EXIT` (note: two underscores). If -set, *Cgreen* will terminate its test-running child process with the -more POSIX-compliant `_exit()`. But as mentioned before, this is, at -least at this point in time, incompatible with collecting coverage -data. +If the output from your *Cgreen* based tests appear garbled or duplicated, this can be caused by the way *Cgreen* terminates its test-running child process. +In many unix-like environments the termination of a child process should be done with `_exit()`. +However, this interfers severily with the ability to collect coverage data. +As this is important for many of us, *Cgreen* instead terminates its child process with the much cruder `exit()` (note: no underscore). + +Under rare circumstances this might have the unwanted effect of output becoming garbled and/or duplicated. + +If this happens you can change that behaviour using an environment variable `CGREEN_CHILD_EXIT_WITH__EXIT` (note: two underscores). +If set, *Cgreen* will terminate its test-running child process with the more POSIX-compliant `_exit()`. +But as mentioned before, this is, at least at this point in time, incompatible with collecting coverage data. + +So, it's coverage __or__ POSIX-correct child exits and guaranteed output consistency. +You can't have both... + +[appendix] +== Legacy Style Assertions + +Cgreen have been around for a while, developed and matured. +There is an older style of assertions that was the initial version, a style that we now call the 'legacy style', because it was more aligned with the original, now older, unit test frameworks. +If you are not interested in historical artifacts, I recommend that you skip this section. + +But for completeness of documentation, here are the legacy style assertion macros: + +|========================================================= +|*Assertion* |*Description* +| `assert_true(boolean)` | Passes if boolean evaluates true +| `assert_false(boolean)` | Fails if boolean evaluates true +| `assert_equal(first, second)` | Passes if 'first == second' +| `assert_not_equal(first, second)` | Passes if 'first != second' +| `assert_string_equal(char *, char *)` | Uses 'strcmp()' and passes if + the strings are equal +| `assert_string_not_equal(char *, char *)` | Uses 'strcmp()' and fails + if the strings are equal +|========================================================= + +Each assertion has a default message comparing the two values. +If you want to substitute your own failure messages, then you must use the `*_with_message()` counterparts... + +|========================================================= +|*Assertion* +| `assert_true_with_message(boolean, message, ...)` +| `assert_false_with_message(boolean, message, ...)` +| `assert_equal_with_message(tried, expected, message, ...)` +| `assert_not_equal_with_message(tried, unexpected, message, ...)` +| `assert_string_equal_with_message(char *, char *, message, ...)` +| `assert_string_not_equal_with_message(char *, char *, message, ...)` +|========================================================= + +All these assertions have an additional `char *` message parameter, which is the message you wished to display on failure. +If this is set to `NULL`, then the default message is shown instead. +The most useful assertion from this group is `assert_true_with_message()` as you can use that to create your own assertion functions with your own messages. + +Actually the assertion macros have variable argument lists. +The failure message acts like the template in `printf()`. +We could change the test above to be... + +[source,c] +----------------------------- +include::tutorial_src/strlen_tests4.c[lines=4..8] +----------------------------- + +This should produce a slightly more user friendly message when things go wrong. +But, actually, Cgreens default messages are so good that you are encouraged to skip the legacy style and go for the more modern constraints style assertions. +This is particularly true when you use the BDD style test notation. + +IMPORTANT: We strongly recommend the use of BDD Style notation with constraints based assertions. + + +[appendix] +== Release History + +In this section only the introduction or changes of major features are listed, and thus only MINOR versions. +For a detailed log of features, enhancements and bug fixes visit the projects repository on GitHub, https://github.com/cgreen-devs/cgreen. + +Since 1.4.1 Cgreen has included the following C pre-processer definition variables + +- `CGREEN_VERSION`, a SemVer string +- `CGREEN_VERSION_MAJOR` +- `CGREEN_VERSION_MINOR` +- `CGREEN_VERSION_PATCH` + +You can use them to conditionally check for Cgreen features introduced as declared in the following sections. + +Since 1.2.0 Cgreen has featured a public version variable in the loaded library, `cgreen_library_version`. +This is mainly used by the `cgreen-runner` to present version of the loaded library, but it can also be used to check for availability of features in the same way. + +=== 1.6.0 + +- Reverted use of `libbfd` introduced in 1.5.0 due to portability issues and Debian deeming it to be a serious bug due to `libbfd` not having a stable interface + +=== 1.5.1 + +- Fixed a problem with `ends_with_string()` which randomly crashed + +=== 1.5.0 + +- Replaced calling `nm` with BFD library calls, this makes the `cgreen-runner` a bit more fiddly to build on some systems +- Introduced `will_capture_parameter()` + +=== 1.4.0 + +- A memory leak in `will_return_by_value()` was fixed but now requires user deallocation. + +=== 1.3.0 + +- Renamed CgreenValueType values to avoid clash, now all start with `CGREEN_` + +=== 1.2.0 + +- Introduced `will_return_by_value()` +- Introduced `with_side_effect()` + +=== 1.1.0 -So, it's coverage __or__ POSIX-correct child exits and guaranteed -output consistency. You can't have both... +None. +=== 1.0.0 +First official non-beta release. [appendix] == License -Copyright (c) 2006-2018, Cgreen Development Team and contributors + +Copyright (c) 2006-2021, Cgreen Development Team and contributors + (https://github.com/cgreen-devs/cgreen/graphs/contributors) Permission to use, copy, modify, and/or distribute this software and @@ -3348,14 +3337,9 @@ Thanks to -- Marcus Baker - initiator and substantial -inital work -- Matt Hargett - upgrading to the modern -BDD-ish syntax -- João Freitas - asciidoc documentation and Cmake -build system -- Thomas Nilefalk - cgreen-runner and current -maintainer +- Marcus Baker - initiator and substantial inital work +- Matt Hargett - upgrading to the modern BDD-ish syntax +- João Freitas - asciidoc documentation and Cmake build system +- Thomas Nilefalk - cgreen-runner and current maintainer -Thanks also go to @gardenia, @d-meiser, @stevemadsenblippar and others -for their contributions. +Thanks also go to @gardenia, @d-meiser, @stevemadsenblippar and others for their contributions. diff -Nru cgreen-1.3.0/doc/cheat-sheet.md cgreen-1.6.3/doc/cheat-sheet.md --- cgreen-1.3.0/doc/cheat-sheet.md 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/cheat-sheet.md 2023-10-03 15:16:52.000000000 +0000 @@ -45,7 +45,7 @@ ends_with_string( ) does_not_end_with_string( ) -### Doubles +### Floating point values (Doubles) is_equal_to_double( ) is_not_equal_to_double( ) diff -Nru cgreen-1.3.0/doc/CMakeLists.txt cgreen-1.6.3/doc/CMakeLists.txt --- cgreen-1.3.0/doc/CMakeLists.txt 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/CMakeLists.txt 2023-10-03 15:16:52.000000000 +0000 @@ -52,5 +52,5 @@ IF(UNIX) set(MANPAGES man) - INSTALL(DIRECTORY ${MANPAGES} DESTINATION share/) + INSTALL(DIRECTORY ${MANPAGES} DESTINATION share) ENDIF(UNIX) diff -Nru cgreen-1.3.0/doc/man/man1/cgreen-debug.1 cgreen-1.6.3/doc/man/man1/cgreen-debug.1 --- cgreen-1.3.0/doc/man/man1/cgreen-debug.1 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/man/man1/cgreen-debug.1 2023-10-03 15:16:52.000000000 +0000 @@ -1,7 +1,7 @@ -.TH CGREEN-DEBUG "1" "May 2020" "cgreen 1.2.0" "User Commands" +.TH CGREEN-DEBUG 1 .SH NAME -cgreen-debug \- start cgreen-runner under GDB and break at a specific test +cgreen-debug \- start cgreen-runner under a debugger and break at a specific test .SH SYNOPSIS .B cgreen\-debug @@ -11,9 +11,9 @@ .SH DESCRIPTION .B cgreen-debug -is a script to start cgreen-runner under gdb, load a \fILIBRARY\fR and break on -a named \fITEST\fR. Where \fILIBRARY\fR is a filename of the shared library of -Cgreen tests, usually .so or .dll depending on your platform. \fITEST\fR is the +is a script to start cgreen-runner under a debugger (primarily gdb), load a \fILIBRARY\fR +and break on a named \fITEST\fR. Where \fILIBRARY\fR is a filename of the shared library of +Cgreen tests, usually .so, .dll or .dylib, depending on your platform. \fITEST\fR is the name of a test in that library in the format :. .SS OPTIONS @@ -21,6 +21,12 @@ .B "\-h, \-\-help" Print some usage information and exit. +.TP +.B "\-d, \-\-debugger" debugger +Instead of default (gdb) use +.I debugger +as the debugger. Allowed values are "gdb", "cgdb", "lldb". + .SH "SEE ALSO" cgreen(5) cgreen-runner(1) @@ -33,6 +39,6 @@ is in the .B Cgreen manual available at -.UR https://\:cgreen-devs.github.io/ +.UR https://cgreen-devs.github.io/ GitHub .UE . diff -Nru cgreen-1.3.0/doc/man/man1/cgreen-runner.1 cgreen-1.6.3/doc/man/man1/cgreen-runner.1 --- cgreen-1.3.0/doc/man/man1/cgreen-runner.1 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/man/man1/cgreen-runner.1 2023-10-03 15:16:52.000000000 +0000 @@ -1,4 +1,3 @@ -.mso www.tmac .TH CGREEN-RUNNER 1 @@ -84,5 +83,7 @@ is in the .B Cgreen manual available at -.URL https://github.com/cgreen-devs/cgreen GitHub . +.UR https://github.com/cgreen-devs/cgreen +GitHub +.UE . diff -Nru cgreen-1.3.0/doc/README.asciidoc cgreen-1.6.3/doc/README.asciidoc --- cgreen-1.3.0/doc/README.asciidoc 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/doc/README.asciidoc 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,7 @@ +// Converted to index.html by asciidoctor-ghpages GitHub Action += Cgreen documentation + +- link:cgreen-guide-en.html[The Guide] +- link:cheat-sheet.html[Cheat Sheet] + + diff -Nru cgreen-1.3.0/doc/tutorial_src/custom_constraint1.c cgreen-1.6.3/doc/tutorial_src/custom_constraint1.c --- cgreen-1.3.0/doc/tutorial_src/custom_constraint1.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/custom_constraint1.c 2023-10-03 15:16:52.000000000 +0000 @@ -11,7 +11,7 @@ } Constraint static_is_bigger_than_5 = { - /* .type */ VALUE_COMPARER, + /* .type */ CGREEN_VALUE_COMPARER_CONSTRAINT, /* .name */ "be bigger than 5", /* .destroy */ destroy_static_constraint, /* .compare */ compare_want_greater_than_5, @@ -19,7 +19,7 @@ /* .format_failure_message_for */ failure_message_for, /* .actual_value_message */ "", /* .expected_value_message */ "", - /* .expected_value */ {INTEGER, {5}}, + /* .expected_value */ {CGREEN_INTEGER, {5}}, /* .stored_value_name */ "null", /* .parameter_name */ NULL, /* .size_of_stored_value */ 0 diff -Nru cgreen-1.3.0/doc/tutorial_src/custom_constraint2.c cgreen-1.6.3/doc/tutorial_src/custom_constraint2.c --- cgreen-1.3.0/doc/tutorial_src/custom_constraint2.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/custom_constraint2.c 2023-10-03 15:16:52.000000000 +0000 @@ -16,7 +16,7 @@ constraint->expected_value = make_cgreen_integer_value(expected_value); constraint->expected_value_name = string_dup(expected_value_name); - constraint->type = VALUE_COMPARER; + constraint->type = CGREEN_VALUE_COMPARER_CONSTRAINT; constraint->compare = &compare_want_smaller_value; constraint->execute = &test_want; diff -Nru cgreen-1.3.0/doc/tutorial_src/custom_constraint3.c cgreen-1.6.3/doc/tutorial_src/custom_constraint3.c --- cgreen-1.3.0/doc/tutorial_src/custom_constraint3.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/custom_constraint3.c 2023-10-03 15:16:52.000000000 +0000 @@ -40,7 +40,7 @@ constraint->expected_value = make_cgreen_pointer_value((void*)expected_value); constraint->expected_value_name = string_dup(expected_value_name); - constraint->type = CONTENT_COMPARER; + constraint->type = CGREEN_CONTENT_COMPARER_CONSTRAINT; constraint->compare = &compare_piece_and_box_size; constraint->execute = &test_fit_piece; diff -Nru cgreen-1.3.0/doc/tutorial_src/first0.out cgreen-1.6.3/doc/tutorial_src/first0.out --- cgreen-1.3.0/doc/tutorial_src/first0.out 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/first0.out 2023-10-03 15:16:52.000000000 +0000 @@ -1,2 +1,2 @@ Running "main" (0 tests)... -Completed "main": No asserts. +Completed "main": No assertions. diff -Nru cgreen-1.3.0/doc/tutorial_src/Makefile cgreen-1.6.3/doc/tutorial_src/Makefile --- cgreen-1.3.0/doc/tutorial_src/Makefile 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/Makefile 2023-10-03 15:16:52.000000000 +0000 @@ -5,7 +5,7 @@ # makes generation of PDF impossible. 'stream2' and 'stream5' might # generate random data as 'actual' values in the output file. That is # the way it is supposed to be to work as documentation examples. -# Take care that data is printable but looks random. +# Take care data for those examples is printable but looks random. CFLAGS = -Wall -g @@ -13,15 +13,13 @@ .SILENT : CGREEN_ROOT=../.. -FPIC = -fPIC # required on Linux, Darwin, ... -# Local tweaks, you can also override "MYSQLROOT={somedir}" on the command line +# Point to MySQL includes and linking for the MySQL examples ifeq ($(shell uname),Darwin) MYSQL_INCLUDE = /opt/local/include/mysql56/mysql MYSQL_LIB = /opt/local/lib/mysql56/mysql else ifeq ($(shell uname -o),Cygwin) -FPIC= MYSQL_INCLUDE = /usr/include/mysql MYSQL_LIB = /usr/lib else @@ -36,23 +34,29 @@ LINK = gcc -g LIBDIRS = -L$(CGREEN_BIN)/src -L$(MYSQL_LIB) -# You might need to fixup the various path issues to get dynamic loading to work. -# You could try the script ../setup.sh to ensure that the bins & libs in this -# development tree is used - -# MacOSX: +# Enviroment dependent fixup of the various paths to use bins & libs in this development tree +FPIC = -fPIC # required on Linux, Darwin, ... +ifeq ($(shell uname),Darwin) LDPATH = DYLD_LIBRARY_PATH="$(CGREEN_BIN)/src:$(MYSQL_LIB)" -# Cygwin? -#LDPATH = PATH="$(CGREEN_BIN)/src:$(MYSQL_LIB)" +else +ifeq ($(shell uname -o),Cygwin) +FPIC= +LDPATH = PATH="$(CGREEN_BIN)/src:$(MYSQL_LIB)" +else +LDPATH = LD_LIBRARY_PATH="$(CGREEN_BIN)/src:$(MYSQL_LIB)" +endif +endif RUNNER = $(LDPATH) $(CGREEN_BIN)/tools/cgreen-runner + all: with_main with_runner special echo echo "NOTE: 'stream2.out' and 'stream5.out' should contain a simulation" echo "NOTE: of random memory as the actual value. Don't commit changes that do" echo "NOTE: not. But also note that PDF generation of the documentation" echo "NOTE: will fail if there are non-UTF-8 characters in them." + echo "NOTE: Edit by hand to reasonable values if needed." # Since we have multiple files for the same tutorial example, distinguished by a number, # we need to remove that number from all output @@ -65,28 +69,29 @@ SED=sed $(REMOVE_NUMBERS) $(NORMALIZE_DURATION) with_main: first0 first1 \ - words0 words1 words2 words3 words4 words5 words6 words7 words8 words9 \ - strlen1 strlen2 strlen3 strlen4 strlen5 strlen6 strlen7 \ - double1 \ - schema1 schema2 \ - crash1 crash2 crash3 \ - suite1 \ - test_as_xml0 \ - test_as_xml1 \ - test_as_xml2 \ - test_as_xml3 \ - test_as_xml4 \ - custom_constraint1 \ - custom_constraint2 \ - custom_constraint3 + words0 words1 words2 words3 words4 words5 words6 words7 words8 words9 \ + strlen1 strlen2 strlen3 strlen4 strlen5 strlen6 strlen7 \ + double1 \ + schema1 schema2 \ + crash1 crash2 crash3 \ + suite1 \ + test_as_xml0 \ + test_as_xml1 \ + test_as_xml2 \ + test_as_xml3 \ + test_as_xml4 \ + custom_constraint1 \ + custom_constraint2 \ + custom_constraint3 for f in $^ ; do echo $$f; $(LDPATH) ./$$f | $(SED) > $$f.out; done # Most tests built to run with the runner we can run here, but some need special care # They have their output produced directly in their build rules below and are run using # the 'special' rule -with_runner: side_effect stream0 stream1 stream2 stream3 stream4 stream6 \ - multiple_streams1 multiple_streams2 \ - formatter0 formatter1 formatter2 formatter3 formatter4 formatter5 formatter6 +with_runner: set_content side_effect stream0 stream1 stream2 stream3 stream4 stream6 \ + multiple_streams1 multiple_streams2 \ + formatter0 formatter1 formatter2 formatter3 formatter4 formatter5 formatter6 \ + struct_parameters for f in $^ ; do echo $$f; $(RUNNER) $$f.so | $(SED) > $$f.out; done special: runner1 runner2 runner3 stream5 learning_mocks @@ -210,6 +215,10 @@ # All these examples run using the runner which automatically outputs the file name # Since that includes the example #, which we don't want in the tutorial, we use # sed to remove that number from the file name in the output +.PHONY: set_content +set_content: stream_tests0.o read_paragraph1.o + $(LINK) -shared -o $@.so $^ $(LIBDIRS) -lcgreen + .PHONY: side_effect side_effect: stream_tests0.o read_paragraph1.o $(LINK) -shared -o $@.so $^ $(LIBDIRS) -lcgreen @@ -294,6 +303,9 @@ formatter6: stream2.o formatter_tests5.o $(LINK) -shared -o $@.so $^ $(LIBDIRS) -lcgreen +struct_parameters: struct_parameters.o + $(LINK) -shared -o $@.so $^ $(LIBDIRS) -lcgreen + learning_mocks: learning_mocks.o $(LINK) -shared -o $@.so $^ $(LIBDIRS) -lcgreen $(RUNNER) $@.so diff -Nru cgreen-1.3.0/doc/tutorial_src/schema1.out cgreen-1.6.3/doc/tutorial_src/schema1.out --- cgreen-1.3.0/doc/tutorial_src/schema1.out 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/schema1.out 2023-10-03 15:16:52.000000000 +0000 @@ -1,11 +0,0 @@ -Running "person_tests" (2 tests)... -schema_tests.c:30: Failure: can_add_person_to_database - Expected [get_person_name(found)] to [equal string] ["Fred"] - actual value: ["(null)"] - expected to equal: ["Fred"] - -schema_tests.c:41: Failure: cannot_add_duplicate_person - Expected [save_person(duplicate)] to [be false] - - "person_tests": 1 pass, 2 failures in 42ms. -Completed "person_tests": 1 pass, 2 failures in 42ms. diff -Nru cgreen-1.3.0/doc/tutorial_src/schema2.out cgreen-1.6.3/doc/tutorial_src/schema2.out --- cgreen-1.3.0/doc/tutorial_src/schema2.out 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/schema2.out 2023-10-03 15:16:52.000000000 +0000 @@ -1,11 +0,0 @@ -Running "person_tests" (2 tests)... -schema_tests.c:29: Failure: can_add_person_to_database - Expected [get_person_name(found)] to [equal string] ["Fred"] - actual value: ["(null)"] - expected to equal: ["Fred"] - -schema_tests.c:38: Failure: cannot_add_duplicate_person - Expected [save_person(duplicate)] to [be false] - - "person_tests": 1 pass, 2 failures in 42ms. -Completed "person_tests": 1 pass, 2 failures in 42ms. diff -Nru cgreen-1.3.0/doc/tutorial_src/set_contents.c cgreen-1.6.3/doc/tutorial_src/set_contents.c --- cgreen-1.3.0/doc/tutorial_src/set_contents.c 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/set_contents.c 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,14 @@ +#include +#include + +void convert_to_uppercase(char *converted_string, const char *original_string) { + mock(converted_string, original_string); +} + +Ensure(setting_content_of_out_parameter) { + expect(convert_to_uppercase, + when(original_string, is_equal_to_string("upper case")), + will_set_contents_of_parameter(converted_string, + "UPPER CASE", 11)); + +} diff -Nru cgreen-1.3.0/doc/tutorial_src/set_field.c cgreen-1.6.3/doc/tutorial_src/set_field.c --- cgreen-1.3.0/doc/tutorial_src/set_field.c 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/set_field.c 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,18 @@ +#include +#include + +struct structure { + int field; + char *string; +}; + +void update_field(struct structure *struct_to_update) { + int *field = &struct_to_update->field; + mock(struct_to_update, field); +} + +Ensure(setting_field_of_parameter) { + int fourty_two = 42; + expect(update_field, + will_set_contents_of_parameter(field, &fourty_two, sizeof(int))); +} diff -Nru cgreen-1.3.0/doc/tutorial_src/side_effect.c cgreen-1.6.3/doc/tutorial_src/side_effect.c --- cgreen-1.3.0/doc/tutorial_src/side_effect.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/side_effect.c 2023-10-03 15:16:52.000000000 +0000 @@ -37,7 +37,7 @@ will_return(EOF), with_side_effect(&update_counter, &number_of_times_reader_was_called)); - expect_never(writer); + never_expect(writer); by_paragraph(&reader, NULL, &writer, NULL); assert_that(number_of_times_reader_was_called, is_equal_to(1)); diff -Nru cgreen-1.3.0/doc/tutorial_src/stream2.out cgreen-1.6.3/doc/tutorial_src/stream2.out --- cgreen-1.3.0/doc/tutorial_src/stream2.out 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/stream2.out 2023-10-03 15:16:52.000000000 +0000 @@ -1,7 +1,7 @@ Running "stream" (2 tests)... stream_tests.c:23: Failure: ParagraphReader -> gives_one_character_line_for_one_character_stream Expected [line] to [equal string] ["a"] - actual value: ["¡0@ð"] + actual value: [""] expected to equal: ["a"] "ParagraphReader": 1 pass, 1 failure in 42ms. diff -Nru cgreen-1.3.0/doc/tutorial_src/struct_parameters.c cgreen-1.6.3/doc/tutorial_src/struct_parameters.c --- cgreen-1.3.0/doc/tutorial_src/struct_parameters.c 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/struct_parameters.c 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,55 @@ +#include +#include +#include + +Describe(StructParameters); +BeforeEach(StructParameters) {} +AfterEach(StructParameters) {} + +/* + This is uncompilable code that is inserted as a non-example + +typedef struct { + int i; + char *string; +} Struct; + +int function_taking_struct(Struct s) { + return (int)mock(?); +} +*/ + +typedef struct { + int i; + char *string; +} Struct; + +int function_checking_a_field(Struct s) { + return (int)mock(s.i); +} + + +Ensure(StructParameters, can_mock_field_in_parameter) { + Struct struct_to_send = { .i = 12, .string = "hello" }; + + expect(function_checking_a_field, when(s.i, is_equal_to(12)), + will_return(12)); + + assert_that(function_checking_a_field(struct_to_send), is_equal_to(12)); + +} + +int function_checking_multiple_fields(Struct s) { + return (int)mock(s.i, s.string); +} + +Ensure(StructParameters, can_mock_muultiple_fields_in_parameter) { + Struct struct_to_send = { .i = 13, .string = "hello world!" }; + + expect(function_checking_multiple_fields, + when(s.i, is_equal_to(13)), + when(s.string, begins_with_string("hello")), + will_return(13)); + + assert_that(function_checking_multiple_fields(struct_to_send), is_equal_to(13)); +} diff -Nru cgreen-1.3.0/doc/tutorial_src/suite1.out cgreen-1.6.3/doc/tutorial_src/suite1.out --- cgreen-1.3.0/doc/tutorial_src/suite1.out 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/suite1.out 2023-10-03 15:16:52.000000000 +0000 @@ -1,17 +0,0 @@ -Running "main" (4 tests)... -suite_strlen_tests.c:9: Failure: our_tests -> returns_five_for_hello - Expected [strlen("Hiya")] to [equal] [5] - actual value: [4] - expected value: [5] - - "our_tests": 1 pass, 1 failure in 42ms. -suite_person_tests.c:29: Failure: person_tests -> can_add_person_to_database - Expected [get_person_name(found)] to [equal string] ["Fred"] - actual value: ["(null)"] - expected to equal: ["Fred"] - -suite_person_tests.c:38: Failure: person_tests -> cannot_add_duplicate_person - Expected [save_person(duplicate)] to [be false] - - "person_tests": 1 pass, 2 failures in 42ms. -Completed "main": 2 passes, 3 failures in 42ms. diff -Nru cgreen-1.3.0/doc/tutorial_src/words0.out cgreen-1.6.3/doc/tutorial_src/words0.out --- cgreen-1.3.0/doc/tutorial_src/words0.out 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/words0.out 2023-10-03 15:16:52.000000000 +0000 @@ -1,3 +1,3 @@ Running "main" (0 tests)... - "words_tests": No asserts. -Completed "main": No asserts. + "words_tests": No assertions. +Completed "main": No assertions. diff -Nru cgreen-1.3.0/doc/tutorial_src/words3.out cgreen-1.6.3/doc/tutorial_src/words3.out --- cgreen-1.3.0/doc/tutorial_src/words3.out 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/words3.out 2023-10-03 15:16:52.000000000 +0000 @@ -1,7 +1,7 @@ Running "main" (2 tests)... words_tests.c:21: Failure: words_tests -> converts_spaces_to_zeroes Expected [comparison] to [equal] [0] - actual value: [-8192] + actual value: [-32] expected value: [0] "words_tests": 1 pass, 1 failure in 42ms. diff -Nru cgreen-1.3.0/doc/tutorial_src/words6.out cgreen-1.6.3/doc/tutorial_src/words6.out --- cgreen-1.3.0/doc/tutorial_src/words6.out 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/doc/tutorial_src/words6.out 2023-10-03 15:16:52.000000000 +0000 @@ -1,5 +1,5 @@ Running "main" (3 tests)... -words_tests.c:33: Failure: words_tests -> invokes_callback_once_for_single_word_sentence +words_tests.c:32: Failure: words_tests -> invokes_callback_once_for_single_word_sentence Expected call was not made to mocked function [mocked_callback] "words_tests": 2 passes, 1 failure in 42ms. diff -Nru cgreen-1.3.0/fatify cgreen-1.6.3/fatify --- cgreen-1.3.0/fatify 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/fatify 2023-10-03 15:16:52.000000000 +0000 @@ -2,12 +2,18 @@ # # This script is a part of Cgreen and creates fat versions of # cgreen-runner and libcgreen.dylib which is necessary if you are -# using e.g. MacPorts gcc which don't have suppport for fat binaries -# built in. +# using e.g. Homebrew/MacPorts gcc which don't have suppport for fat +# binaries built in. # -# It ensures that both architectures are built, saving them under -# descriptive names and then uses lipo to merge them into fat -# binaries. +# NOTE: This does not work completely automatic for x86_64/M1 since +# Homebrew GCC can only compile for the current architecture. +# +# Since automatically ensuring that both architectures are built is +# currently impossible from a single environment, you have to run this +# script once on each architecture. The script will save the built +# binaries under descriptive names and if it discovers that binaries +# for the other architecture already exists it uses lipo to merge them +# into fat binaries. # # At this point there is no way to run this as part of the CMake build # and install, and since the install target of CMake always rebuilds @@ -16,34 +22,40 @@ # /thoni56 # function buildfor() { - cmake -DCMAKE_OSX_ARCHITECTURES=$1 .. + rm -rf build make - cp tools/cgreen-runner tools/cgreen-runner.$1 - cp src/libcgreen.dylib src/libcgreen.$1.dylib + cp build/tools/cgreen-runner cgreen-runner.$1 + cp build/src/libcgreen.dylib libcgreen.$1.dylib } - -# Ensure there is at least a build directory -if [ ! -d "build" ]; then - mkdir build +arch=`arch` +if [ $arch == "arm64" ] ; then + buildfor arm64 + if [ ! -f libcgreen.x86_64.dylib ] ; then + echo "You need to run this aldo in an x86_64 environment to create fat binaries." + exit + fi +else + buildfor x86_64 + if [ ! -f libcgreen.arm64.dylib ] ; then + echo "You need to run this in an arm64 environment to create fat binaries." + exit + fi fi -cd build - -buildfor i386 -buildfor x86_64 -lipo -create -o tools/cgreen-runner.fat -arch i386 tools/cgreen-runner.i386 -arch x86_64 tools/cgreen-runner.x86_64 -lipo -create -o src/libcgreen.dylib.fat -arch i386 src/libcgreen.i386.dylib -arch x86_64 src/libcgreen.x86_64.dylib +lipo -create -o cgreen-runner -arch arm64 cgreen-runner.arm64 -arch x86_64 cgreen-runner.x86_64 +lipo -create -o libcgreen.dylib -arch arm64 libcgreen.arm64.dylib -arch x86_64 libcgreen.x86_64.dylib -if [ -z "$1" ]; then - echo "Fat binaries build for build/tools/cgreen-runner and build/src/libcgreen.dylib" - echo "Install with:" - echo " sudo cp build/tools/cgreen-runner.fat /bin/cgreen-runner" - echo " sudo cp src/libcgreen.dylib.fat /lib/libcgreen.dylib" - echo - echo "If is the same for bin and lib, such as '/usr/local'," - echo "you can give that as an argument and the script will do that for you." +if [ -z "$1" ] ; then + echo "Fat binaries of cgreen-runner and libcgreen.dylib built:" + file cgreen-runner libcgreen.dylib + echo "Install with:" + echo " sudo cp cgreen-runner /bin/cgreen-runner" + echo " sudo cp libcgreen.dylib /lib/libcgreen.dylib" + echo + echo "If is the same for bin and lib, such as '/usr/local'," + echo "you can give that as an argument and the script will do that for you." else - echo "Installing 'cgreen-runner' in $1/bin and 'libcgreen.dylib' in $1/lib..." - sudo cp tools/cgreen-runner.fat $1/bin/cgreen-runner - sudo cp src/libcgreen.dylib.fat $1/lib/libcgreen.dylib + echo "Installing fat 'cgreen-runner' in $1/bin and 'libcgreen.dylib' in $1/lib..." + sudo cp cgreen-runner $1/bin/cgreen-runner + sudo cp libcgreen.dylib $1/lib/libcgreen.dylib fi diff -Nru cgreen-1.3.0/.github/workflows/asciidoctor-docs.yml cgreen-1.6.3/.github/workflows/asciidoctor-docs.yml --- cgreen-1.3.0/.github/workflows/asciidoctor-docs.yml 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/.github/workflows/asciidoctor-docs.yml 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,26 @@ +name: asciidoctor-docs + +on: + push: + branches: + - master + +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v3 + + # Includes the AsciiDoctor GitHub Pages Action to convert adoc files to html and publish to gh-pages branch + - name: asciidoctor-ghpages + uses: manoelcampos/asciidoctor-ghpages-action@v2.2.4 + with: + asciidoctor_params: -r asciidoctor-diagram -a VERSION=1.6.0 + source_dir: doc + post_build: git add -f *.png + adoc_file_ext: .asciidoc diff -Nru cgreen-1.3.0/.gitignore cgreen-1.6.3/.gitignore --- cgreen-1.3.0/.gitignore 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/.gitignore 2023-10-03 15:16:52.000000000 +0000 @@ -5,6 +5,7 @@ Testing *.exe *.stackdump +*.so build-stamp configure-stamp *~ @@ -14,3 +15,4 @@ lib valgrind.log gitrevision.h +.cgreen-debug-commands \ No newline at end of file diff -Nru cgreen-1.3.0/gitrevision.h.in cgreen-1.6.3/gitrevision.h.in --- cgreen-1.3.0/gitrevision.h.in 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/gitrevision.h.in 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,3 @@ +/* This file was autogenerated based from cmake */ +#cmakedefine GITREVISION "@GITREVISION@" + diff -Nru cgreen-1.3.0/include/cgreen/assertions.h cgreen-1.6.3/include/cgreen/assertions.h --- cgreen-1.3.0/include/cgreen/assertions.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/assertions.h 2023-10-03 15:16:52.000000000 +0000 @@ -5,6 +5,7 @@ #include "internal/stringify_token.h" #include +#include #include #include @@ -27,7 +28,7 @@ */ #define assert_that(...) assert_that_NARG(__VA_ARGS__)(__VA_ARGS__) -#define assert_that_double(actual, constraint) assert_that_double_(__FILE__, __LINE__, STRINGIFY_TOKEN(actual), (double)(actual), (constraint)) +#define assert_that_double(actual, constraint) assert_that_double_(FILENAME, __LINE__, STRINGIFY_TOKEN(actual), (double)(actual), (constraint)) #define pass_test() assert_true(true) #define fail_test(...) assert_true_with_message(false, __VA_ARGS__) diff -Nru cgreen-1.3.0/include/cgreen/cgreen.h cgreen-1.6.3/include/cgreen/cgreen.h --- cgreen-1.3.0/include/cgreen/cgreen.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/cgreen.h 2023-10-03 15:16:52.000000000 +0000 @@ -9,5 +9,10 @@ #include #include +#define CGREEN_VERSION "1.6.3" +#define CGREEN_VERSION_MAJOR 1 +#define CGREEN_VERSION_MINOR 6 +#define CGREEN_VERSION_PATCH 3 + extern char *cgreen_library_version; extern char *cgreen_library_revision; diff -Nru cgreen-1.3.0/include/cgreen/CMakeLists.txt cgreen-1.6.3/include/cgreen/CMakeLists.txt --- cgreen-1.3.0/include/cgreen/CMakeLists.txt 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/CMakeLists.txt 2023-10-03 15:16:52.000000000 +0000 @@ -10,6 +10,7 @@ cpp_assertions.h cpp_constraint.h cute_reporter.h + filename.h legacy.h mocks.h string_comparison.h @@ -19,6 +20,7 @@ text_reporter.h unit.h vector.h + xml_reporter.h ) install( diff -Nru cgreen-1.3.0/include/cgreen/constraint.h cgreen-1.6.3/include/cgreen/constraint.h --- cgreen-1.3.0/include/cgreen/constraint.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/constraint.h 2023-10-03 15:16:52.000000000 +0000 @@ -10,15 +10,17 @@ #include typedef enum { - VALUE_COMPARER, - CONTENT_COMPARER, - STRING_COMPARER, - DOUBLE_COMPARER, - RETURN_VALUE, - CONTENT_SETTER, - RETURN_POINTER, - CALL, - CALL_COUNTER + CGREEN_VALUE_COMPARER_CONSTRAINT, + CGREEN_CONTENT_COMPARER_CONSTRAINT, + CGREEN_STRING_COMPARER_CONSTRAINT, + CGREEN_DOUBLE_COMPARER_CONSTRAINT, + CGREEN_RETURN_VALUE_CONSTRAINT, + CGREEN_CONTENT_SETTER_CONSTRAINT, + CGREEN_RETURN_POINTER_CONSTRAINT, + CGREEN_CALL_CONSTRAINT, + CGREEN_CALL_COUNTER_CONSTRAINT, + CGREEN_RETURN_BY_VALUE_CONSTRAINT, + CGREEN_CAPTURE_PARAMETER_CONSTRAINT } ConstraintType; typedef struct Constraint_ Constraint; @@ -60,6 +62,10 @@ Constraint *create_equal_to_value_constraint(intptr_t expected_value, const char *expected_value_name); Constraint *create_equal_to_hexvalue_constraint(intptr_t expected_value, const char *expected_value_name); Constraint *create_not_equal_to_value_constraint(intptr_t expected_value, const char *expected_value_name); +Constraint *create_not_null_constraint(void); +Constraint *create_is_null_constraint(void); +Constraint *create_is_false_constraint(void); +Constraint *create_is_true_constraint(void); Constraint *create_greater_than_value_constraint(intptr_t expected_value, const char *expected_value_name); Constraint *create_less_than_value_constraint(intptr_t expected_value, const char *expected_value_name); Constraint *create_equal_to_contents_constraint(void *pointer_to_compare, size_t size_to_compare, const char *compared_pointer_name); @@ -82,6 +88,7 @@ Constraint *create_return_double_value_constraint(double value_to_return); Constraint *create_set_parameter_value_constraint(const char *parameter_name, intptr_t value_to_set, size_t size_to_set); Constraint *create_with_side_effect_constraint(void (*callback)(void *), void *data); +Constraint *create_capture_parameter_constraint(const char *parameter_name, void *captured, size_t size_to_capture); #ifdef __cplusplus } diff -Nru cgreen-1.3.0/include/cgreen/constraint_syntax_helpers.h cgreen-1.6.3/include/cgreen/constraint_syntax_helpers.h --- cgreen-1.3.0/include/cgreen/constraint_syntax_helpers.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/constraint_syntax_helpers.h 2023-10-03 15:16:52.000000000 +0000 @@ -18,6 +18,11 @@ #define is_equal_to(value) create_equal_to_value_constraint((intptr_t)value, #value) #define is_equal_to_hex(value) create_equal_to_hexvalue_constraint((intptr_t)value, #value) #define is_not_equal_to(value) create_not_equal_to_value_constraint((intptr_t)value, #value) +#define is_non_null is_not_null +#define is_not_null create_not_null_constraint() +#define is_null create_is_null_constraint() +#define is_false create_is_false_constraint() +#define is_true create_is_true_constraint() #define is_greater_than(value) create_greater_than_value_constraint((intptr_t)value, #value) #define is_less_than(value) create_less_than_value_constraint((intptr_t)value, #value) @@ -46,32 +51,6 @@ #define will_return_by_value(value, size) create_return_by_value_constraint((intptr_t)&value, size) #define will_return_double(value) create_return_double_value_constraint(value) #define will_set_contents_of_parameter(parameter_name, pointer_to_value, size) create_set_parameter_value_constraint(#parameter_name, (intptr_t)pointer_to_value, (size_t)size) - - -#ifdef __cplusplus -extern "C" { -#endif - -/* these constraints don't take arguments, and we don't want to force - * users to put "()" on the end of every usage. we also want to avoid - * macros when practical, for the namespacing and confusing symbol - * collision issues, so we use singleton instances. - */ -extern Constraint static_non_null_constraint; -extern Constraint *is_non_null; -#define is_not_null (is_non_null) - -extern Constraint static_null_constraint; -extern Constraint *is_null; - -extern Constraint static_false_constraint; -extern Constraint *is_false; - -extern Constraint static_true_constraint; -extern Constraint *is_true; - -#ifdef __cplusplus -} -#endif +#define will_capture_parameter(parameter_name, local_variable) create_capture_parameter_constraint(#parameter_name, &local_variable, sizeof(local_variable)) #endif diff -Nru cgreen-1.3.0/include/cgreen/cpp_constraint.h cgreen-1.6.3/include/cgreen/cpp_constraint.h --- cgreen-1.3.0/include/cgreen/cpp_constraint.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/cpp_constraint.h 2023-10-03 15:16:52.000000000 +0000 @@ -52,10 +52,10 @@ CppConstraint *create_equal_to_value_constraint(CgreenValue cgreen_value, T expected_value, const char *expected_value_name) { CppConstraint *constraint;// = create_cpp_constraint(); constraint = new CppConstraint(); - constraint->type = VALUE_COMPARER; + constraint->type = CGREEN_VALUE_COMPARER_CONSTRAINT; (void)cgreen_value; /* Avoid warnings for UNUSED, which it is for now */ - + constraint->Constraint::compare = &compare_want_value; constraint->execute = &test_want; constraint->name = "equal"; diff -Nru cgreen-1.3.0/include/cgreen/filename.h cgreen-1.6.3/include/cgreen/filename.h --- cgreen-1.3.0/include/cgreen/filename.h 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/filename.h 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,19 @@ +#ifndef FILENAME_HEADER +#define FILENAME_HEADER + +#ifdef __cplusplus +namespace cgreen { + extern "C" { +#endif + +#ifndef FILENAME +#define FILENAME __FILE__ +#endif + +#ifdef __cplusplus + } +} +#endif + + +#endif diff -Nru cgreen-1.3.0/include/cgreen/internal/android_headers/androidcompat.h cgreen-1.6.3/include/cgreen/internal/android_headers/androidcompat.h --- cgreen-1.3.0/include/cgreen/internal/android_headers/androidcompat.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/internal/android_headers/androidcompat.h 2023-10-03 15:16:52.000000000 +0000 @@ -27,6 +27,4 @@ #define printf(fmt, ...) LOGE(fmt, ##__VA_ARGS__) #define fprintf(stderr, ...) LOGE(__VA_ARGS__) #define vprintf(fmt, ...) LOGE(fmt, ##__VA_ARGS__) -#define snprintf(buf, len, fmt, ...) LOGE(fmt, ##__VA_ARGS__) -#define sprintf(buf, fmt, ...) LOGE(fmt, ##__VA_ARGS__) #endif // #ifdef __ANDROID__ diff -Nru cgreen-1.3.0/include/cgreen/internal/assertions_internal.h cgreen-1.6.3/include/cgreen/internal/assertions_internal.h --- cgreen-1.3.0/include/cgreen/internal/assertions_internal.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/internal/assertions_internal.h 2023-10-03 15:16:52.000000000 +0000 @@ -1,6 +1,7 @@ #ifndef ASSERTIONS_INTERNAL_HEADER #define ASSERTIONS_INTERNAL_HEADER +#include #include #ifdef __cplusplus @@ -41,7 +42,7 @@ #define assert_that_NARG(...) ASSERT_THAT_macro_dispatcher(assert_that, __VA_ARGS__) #define assert_that_expression(expression) \ - assert_core_(__FILE__, __LINE__, STRINGIFY_TOKEN(expression), (expression), is_true); + assert_core_(FILENAME, __LINE__, STRINGIFY_TOKEN(expression), (expression), is_true); void assert_equal_(const char *file, int line, const char *expression, intptr_t tried, intptr_t expected); void assert_not_equal_(const char *file, int line, const char *expression, intptr_t tried, intptr_t expected); diff -Nru cgreen-1.3.0/include/cgreen/internal/c_assertions.h cgreen-1.6.3/include/cgreen/internal/c_assertions.h --- cgreen-1.3.0/include/cgreen/internal/c_assertions.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/internal/c_assertions.h 2023-10-03 15:16:52.000000000 +0000 @@ -2,12 +2,13 @@ #define C_ASSERTIONS_HEADER #include +#include #include #include "stringify_token.h" #ifndef __cplusplus -#define assert_that_constraint(actual, constraint) assert_core_(__FILE__, __LINE__, STRINGIFY_TOKEN(actual), (intptr_t)(actual), (constraint)) +#define assert_that_constraint(actual, constraint) assert_core_(FILENAME, __LINE__, STRINGIFY_TOKEN(actual), (intptr_t)(actual), (constraint)) #endif #ifdef __cplusplus diff -Nru cgreen-1.3.0/include/cgreen/internal/cgreen_time.h cgreen-1.6.3/include/cgreen/internal/cgreen_time.h --- cgreen-1.3.0/include/cgreen/internal/cgreen_time.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/internal/cgreen_time.h 2023-10-03 15:16:52.000000000 +0000 @@ -2,7 +2,6 @@ #define CGREEN_TIME_HEADER #include -#include #ifdef __cplusplus namespace cgreen { diff -Nru cgreen-1.3.0/include/cgreen/internal/cpp_assertions.h cgreen-1.6.3/include/cgreen/internal/cpp_assertions.h --- cgreen-1.3.0/include/cgreen/internal/cpp_assertions.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/internal/cpp_assertions.h 2023-10-03 15:16:52.000000000 +0000 @@ -2,6 +2,7 @@ #define INTERNAL_CPP_ASSERTIONS_HEADER #include +#include #include #include #include @@ -10,7 +11,7 @@ namespace cgreen { -#define assert_that_constraint(actual, constraint) assert_that_(__FILE__, __LINE__, STRINGIFY_TOKEN(actual), actual, constraint) +#define assert_that_constraint(actual, constraint) assert_that_(FILENAME, __LINE__, STRINGIFY_TOKEN(actual), actual, constraint) void assert_that_(const char *file, int line, const char *actual_string, const std::string& actual, Constraint *constraint); void assert_that_(const char *file, int line, const char *actual_string, const std::string *actual, Constraint *constraint); diff -Nru cgreen-1.3.0/include/cgreen/internal/unit_implementation.h cgreen-1.6.3/include/cgreen/internal/unit_implementation.h --- cgreen-1.3.0/include/cgreen/internal/unit_implementation.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/internal/unit_implementation.h 2023-10-03 15:16:52.000000000 +0000 @@ -1,6 +1,8 @@ #ifndef UNIT_IMPLEMENTATION_HEADER #define UNIT_IMPLEMENTATION_HEADER +#include + #ifdef __cplusplus #include #endif @@ -45,20 +47,20 @@ #define EnsureWithContextAndSpecificationName(skip, contextName, specName) \ static void contextName##__##specName (void);\ - CgreenTest spec_name(contextName, specName) = { skip, &contextFor##contextName, STRINGIFY_TOKEN(specName), &contextName##__##specName, __FILE__, __LINE__ }; \ + CgreenTest spec_name(contextName, specName) = { skip, &contextFor##contextName, STRINGIFY_TOKEN(specName), &contextName##__##specName, FILENAME, __LINE__ }; \ static void contextName##__##specName (void) extern CgreenContext defaultContext; #define EnsureWithSpecificationName(skip, specName) \ static void specName (void);\ - CgreenTest spec_name(default, specName) = { skip, &defaultContext, STRINGIFY_TOKEN(specName), &specName, __FILE__, __LINE__ }; \ + CgreenTest spec_name(default, specName) = { skip, &defaultContext, STRINGIFY_TOKEN(specName), &specName, FILENAME, __LINE__ }; \ static void specName (void) #define DescribeImplementation(subject) \ static void setup(void); \ static void teardown(void); \ - static CgreenContext contextFor##subject = { STRINGIFY_TOKEN(subject), __FILE__, &setup, &teardown }; \ + static CgreenContext contextFor##subject = { STRINGIFY_TOKEN(subject), FILENAME, &setup, &teardown }; \ extern void(*BeforeEach_For_##subject)(void); \ extern void(*AfterEach_For_##subject)(void); \ static void setup(void) { \ diff -Nru cgreen-1.3.0/include/cgreen/legacy.h cgreen-1.6.3/include/cgreen/legacy.h --- cgreen-1.3.0/include/cgreen/legacy.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/legacy.h 2023-10-03 15:16:52.000000000 +0000 @@ -1,42 +1,80 @@ #ifndef LEGACY_HEADER #define LEGACY_HEADER +#include + #include "internal/stringify_token.h" +#ifdef __cplusplus +/* Legacy style asserts (for C++):*/ +#define assert_true(result) \ + (*cgreen::get_test_reporter()->assert_true)(cgreen::get_test_reporter(), FILENAME, __LINE__, (result), "[" STRINGIFY_TOKEN(result) "] should be true\n", NULL) +#define assert_false(result) \ + (*cgreen::get_test_reporter()->assert_true)(cgreen::get_test_reporter(), FILENAME, __LINE__, ! (result), "[" STRINGIFY_TOKEN(result) "] should be false\n", NULL) +#define assert_equal(tried, expected) \ + assert_equal_(FILENAME, __LINE__, STRINGIFY_TOKEN(tried), (intptr_t)(tried), (intptr_t)(expected)) +#define assert_not_equal(tried, expected) \ + assert_not_equal_(FILENAME, __LINE__, STRINGIFY_TOKEN(tried), (intptr_t)(tried), (intptr_t)(expected)) +#define assert_double_equal(tried, expected) \ + assert_double_equal_(FILENAME, __LINE__, STRINGIFY_TOKEN(tried), (tried), (expected)) +#define assert_double_not_equal(tried, expected) \ + assert_double_not_equal_(FILENAME, __LINE__, STRINGIFY_TOKEN(tried), (tried), (expected)) +#define assert_string_equal(tried, expected) \ + assert_string_equal_(FILENAME, __LINE__, STRINGIFY_TOKEN(tried), (tried), (expected)) +#define assert_string_not_equal(tried, expected) \ + assert_string_not_equal_(FILENAME, __LINE__, STRINGIFY_TOKEN(tried), (tried), (expected)) -/* Legacy style asserts:*/ +#define assert_true_with_message(result, ...) \ + (*cgreen::get_test_reporter()->assert_true)(cgreen::get_test_reporter(), FILENAME, __LINE__, (result), __VA_ARGS__) +#define assert_false_with_message(result, ...) \ + (*cgreen::get_test_reporter()->assert_true)(cgreen::get_test_reporter(), FILENAME, __LINE__, ! (result), __VA_ARGS__) +#define assert_equal_with_message(tried, expected, ...) \ + (*cgreen::get_test_reporter()->assert_true)(cgreen::get_test_reporter(), FILENAME, __LINE__, ((tried) == (expected)), __VA_ARGS__) +#define assert_not_equal_with_message(tried, expected, ...) \ + (*cgreen::get_test_reporter()->assert_true)(cgreen::get_test_reporter(), FILENAME, __LINE__, ((tried) != (expected)), __VA_ARGS__) +#define assert_double_equal_with_message(tried, expected, ...) \ + (*cgreen::get_test_reporter()->assert_true)(cgreen::get_test_reporter(), FILENAME, __LINE__, doubles_are_equal((tried), (expected)), __VA_ARGS__) +#define assert_double_not_equal_with_message(tried, expected, ...) \ + (*cgreen::get_test_reporter()->assert_true)(cgreen::get_test_reporter(), FILENAME, __LINE__, doubles_are_equal((tried), (expected)), __VA_ARGS__) +#define assert_string_equal_with_message(tried, expected, ...) \ + (*cgreen::get_test_reporter()->assert_true)(cgreen::get_test_reporter(), FILENAME, __LINE__, strings_are_equal((tried), (expected)), __VA_ARGS__) +#define assert_string_not_equal_with_message(tried, expected, ...) \ + (*cgreen::get_test_reporter()->assert_true)(cgreen::get_test_reporter(), FILENAME, __LINE__, !strings_are_equal((tried), (expected)), __VA_ARGS__) +#else +/* Legacy style asserts (for C):*/ #define assert_true(result) \ - (*get_test_reporter()->assert_true)(get_test_reporter(), __FILE__, __LINE__, (result), "[" STRINGIFY_TOKEN(result) "] should be true\n", NULL) + (*get_test_reporter()->assert_true)(get_test_reporter(), FILENAME, __LINE__, (result), "[" STRINGIFY_TOKEN(result) "] should be true\n", NULL) #define assert_false(result) \ - (*get_test_reporter()->assert_true)(get_test_reporter(), __FILE__, __LINE__, ! (result), "[" STRINGIFY_TOKEN(result) "] should be false\n", NULL) + (*get_test_reporter()->assert_true)(get_test_reporter(), FILENAME, __LINE__, ! (result), "[" STRINGIFY_TOKEN(result) "] should be false\n", NULL) #define assert_equal(tried, expected) \ - assert_equal_(__FILE__, __LINE__, STRINGIFY_TOKEN(tried), (intptr_t)(tried), (intptr_t)(expected)) + assert_equal_(FILENAME, __LINE__, STRINGIFY_TOKEN(tried), (intptr_t)(tried), (intptr_t)(expected)) #define assert_not_equal(tried, expected) \ - assert_not_equal_(__FILE__, __LINE__, STRINGIFY_TOKEN(tried), (intptr_t)(tried), (intptr_t)(expected)) + assert_not_equal_(FILENAME, __LINE__, STRINGIFY_TOKEN(tried), (intptr_t)(tried), (intptr_t)(expected)) #define assert_double_equal(tried, expected) \ - assert_double_equal_(__FILE__, __LINE__, STRINGIFY_TOKEN(tried), (tried), (expected)) + assert_double_equal_(FILENAME, __LINE__, STRINGIFY_TOKEN(tried), (tried), (expected)) #define assert_double_not_equal(tried, expected) \ - assert_double_not_equal_(__FILE__, __LINE__, STRINGIFY_TOKEN(tried), (tried), (expected)) + assert_double_not_equal_(FILENAME, __LINE__, STRINGIFY_TOKEN(tried), (tried), (expected)) #define assert_string_equal(tried, expected) \ - assert_string_equal_(__FILE__, __LINE__, STRINGIFY_TOKEN(tried), (tried), (expected)) + assert_string_equal_(FILENAME, __LINE__, STRINGIFY_TOKEN(tried), (tried), (expected)) #define assert_string_not_equal(tried, expected) \ - assert_string_not_equal_(__FILE__, __LINE__, STRINGIFY_TOKEN(tried), (tried), (expected)) + assert_string_not_equal_(FILENAME, __LINE__, STRINGIFY_TOKEN(tried), (tried), (expected)) #define assert_true_with_message(result, ...) \ - (*get_test_reporter()->assert_true)(get_test_reporter(), __FILE__, __LINE__, (result), __VA_ARGS__) + (*get_test_reporter()->assert_true)(get_test_reporter(), FILENAME, __LINE__, (result), __VA_ARGS__) #define assert_false_with_message(result, ...) \ - (*get_test_reporter()->assert_true)(get_test_reporter(), __FILE__, __LINE__, ! (result), __VA_ARGS__) + (*get_test_reporter()->assert_true)(get_test_reporter(), FILENAME, __LINE__, ! (result), __VA_ARGS__) #define assert_equal_with_message(tried, expected, ...) \ - (*get_test_reporter()->assert_true)(get_test_reporter(), __FILE__, __LINE__, ((tried) == (expected)), __VA_ARGS__) + (*get_test_reporter()->assert_true)(get_test_reporter(), FILENAME, __LINE__, ((tried) == (expected)), __VA_ARGS__) #define assert_not_equal_with_message(tried, expected, ...) \ - (*get_test_reporter()->assert_true)(get_test_reporter(), __FILE__, __LINE__, ((tried) != (expected)), __VA_ARGS__) + (*get_test_reporter()->assert_true)(get_test_reporter(), FILENAME, __LINE__, ((tried) != (expected)), __VA_ARGS__) #define assert_double_equal_with_message(tried, expected, ...) \ - (*get_test_reporter()->assert_true)(get_test_reporter(), __FILE__, __LINE__, doubles_are_equal((tried), (expected)), __VA_ARGS__) + (*get_test_reporter()->assert_true)(get_test_reporter(), FILENAME, __LINE__, doubles_are_equal((tried), (expected)), __VA_ARGS__) #define assert_double_not_equal_with_message(tried, expected, ...) \ - (*get_test_reporter()->assert_true)(get_test_reporter(), __FILE__, __LINE__, doubles_are_equal((tried), (expected)), __VA_ARGS__) + (*get_test_reporter()->assert_true)(get_test_reporter(), FILENAME, __LINE__, doubles_are_equal((tried), (expected)), __VA_ARGS__) #define assert_string_equal_with_message(tried, expected, ...) \ - (*get_test_reporter()->assert_true)(get_test_reporter(), __FILE__, __LINE__, strings_are_equal((tried), (expected)), __VA_ARGS__) + (*get_test_reporter()->assert_true)(get_test_reporter(), FILENAME, __LINE__, strings_are_equal((tried), (expected)), __VA_ARGS__) #define assert_string_not_equal_with_message(tried, expected, ...) \ - (*get_test_reporter()->assert_true)(get_test_reporter(), __FILE__, __LINE__, !strings_are_equal((tried), (expected)), __VA_ARGS__) + (*get_test_reporter()->assert_true)(get_test_reporter(), FILENAME, __LINE__, !strings_are_equal((tried), (expected)), __VA_ARGS__) +#endif #endif diff -Nru cgreen-1.3.0/include/cgreen/mocks.h cgreen-1.6.3/include/cgreen/mocks.h --- cgreen-1.3.0/include/cgreen/mocks.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/mocks.h 2023-10-03 15:16:52.000000000 +0000 @@ -4,6 +4,7 @@ #include #include #include +#include #include #ifdef __cplusplus @@ -17,20 +18,20 @@ expect(, when(, ), will_return()); */ -#define expect(f, ...) expect_(get_test_reporter(), STRINGIFY_TOKEN(f), __FILE__, __LINE__, \ +#define expect(f, ...) expect_(get_test_reporter(), STRINGIFY_TOKEN(f), FILENAME, __LINE__, \ (Constraint *)__VA_ARGS__ +0, (Constraint *)0) -#define always_expect(f, ...) always_expect_(get_test_reporter(), STRINGIFY_TOKEN(f), __FILE__, __LINE__, \ +#define always_expect(f, ...) always_expect_(get_test_reporter(), STRINGIFY_TOKEN(f), FILENAME, __LINE__, \ (Constraint *)__VA_ARGS__ +0, (Constraint *)0) -#define never_expect(f, ...) never_expect_(get_test_reporter(), STRINGIFY_TOKEN(f), __FILE__, __LINE__, \ +#define never_expect(f, ...) never_expect_(get_test_reporter(), STRINGIFY_TOKEN(f), FILENAME, __LINE__, \ (Constraint *)__VA_ARGS__ +0, (Constraint *)0) #ifdef _MSC_VER // another workaround for fundamental variadic macro deficiencies in Visual C++ 2012 -#define mock(...) PP_NARG(__VA_ARGS__)(get_test_reporter(), __func__, __FILE__, __LINE__, #__VA_ARGS__ "", \ +#define mock(...) PP_NARG(__VA_ARGS__)(get_test_reporter(), __func__, FILENAME, __LINE__, #__VA_ARGS__ "", \ __VA_ARGS__) #else -#define mock(...) PP_NARG(__VA_ARGS__)(get_test_reporter(), __func__, __FILE__, __LINE__, #__VA_ARGS__ "", \ +#define mock(...) PP_NARG(__VA_ARGS__)(get_test_reporter(), __func__, FILENAME, __LINE__, #__VA_ARGS__ "", \ __VA_ARGS__ +0) #endif diff -Nru cgreen-1.3.0/include/cgreen/suite.h cgreen-1.6.3/include/cgreen/suite.h --- cgreen-1.3.0/include/cgreen/suite.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/suite.h 2023-10-03 15:16:52.000000000 +0000 @@ -3,6 +3,7 @@ #include "internal/suite_internal.h" +#include #include #include #include @@ -16,8 +17,8 @@ extern "C" { #endif -#define create_test_suite() create_named_test_suite_(__func__, __FILE__, __LINE__) -#define create_named_test_suite(name) create_named_test_suite_(name, __FILE__, __LINE__) +#define create_test_suite() create_named_test_suite_(__func__, FILENAME, __LINE__) +#define create_named_test_suite(name) create_named_test_suite_(name, FILENAME, __LINE__) #define add_test(suite, test) add_test_(suite, STRINGIFY_TOKEN(test), &spec_name(default, test)) #define add_test_with_context(suite, context, test) add_test_(suite, STRINGIFY_TOKEN(test), &spec_name(context, test)) #define add_tests(suite, ...) add_tests_(suite, #__VA_ARGS__, (CgreenTest *)__VA_ARGS__) diff -Nru cgreen-1.3.0/include/cgreen/xml_reporter.h cgreen-1.6.3/include/cgreen/xml_reporter.h --- cgreen-1.3.0/include/cgreen/xml_reporter.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/include/cgreen/xml_reporter.h 2023-10-03 15:16:52.000000000 +0000 @@ -13,6 +13,7 @@ #endif TestReporter *create_xml_reporter(const char *prefix); +TestReporter *create_libxml_reporter(const char *prefix); #ifdef __cplusplus } diff -Nru cgreen-1.3.0/INSTALL cgreen-1.6.3/INSTALL --- cgreen-1.3.0/INSTALL 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/INSTALL 1970-01-01 00:00:00.000000000 +0000 @@ -1,82 +0,0 @@ -Building and Installing Cgreen -============================== - -This document is incomplete. ??? are placeholders. - -There are several ways to use Cgreen: - -1) You want to write some tests for a personal project, but don't need to ship - the test suite with the product, or you just want to try out Cgreen. -2) You want to write some tests that will be used by other developers. This - is the normal way to use Cgreen. -3) You want to ship working tests with a product and want to bundle Cgreen - as the test runner. -4) You are pulling Cgreen directly from github: - git clone git@github.com:cgreen-devs/cgreen.git - -We'll walk you through these in order... - -1) So you want to write a test quickly. - -As a user wanting to write some quick tests, the quickest way is to -download the prebuilt binaries for your platform. Just install the -files in some private tree. - -You will see a cgreen.h and both a cgreen shared object or DLL, and a -static library for your system. - -Include the header and link the library as you normally would. - -2) Cgreen needs to be installed to be usable by everyone. - -This is the same as scenario 1, but install the files in a public -/usr/local (or equivalent). The Cgreen header should just be available -through... - -#include - -...and adding -lcgreen should be enough to link under gcc. - -3) ??? - -4) You will have to carry out some additional steps if you work straight off -of a Git clone. - -We are using the CMake (www.cmake.org) build system. The following -steps are called "out-of-source building", because we will build all -the project files outside of the sources directory to completely -retain the original source tree. - -Assuming you have the cloned cgreen sources into a subdirectory -"cgreen" in the current directory we want to create a build directory -parallel to the source directory. - -The default Makefile will create a separate 'build' directory and inside -that subdirectories for C and C++. That means that you can just do: - - $ make - $ make test - $ make install - -You also have some options -- build with static libraries -- build HTML or PDF documentation - -To enable any of these use the cmake graphical user interface (CMakeSetup -on Windows or ccmake on UNIX) to turn these options on or off. - -Note on CYGWIN: Cygwin is not a WIN32 platform, but it is a -DLL-platform where libraries normally goes in the bin directory. Running -the self-tests handles this automatically but if you want to use the binaries -in the 'build'-tree you can either: - -1) install before running the tests and also set your PATH to include -"/usr/local/lib" - -2) setting the PATH to include the build directory where the libraries are - there is a sh-compatible command script to do that for you. From the top - of the Cgreen source directory do: - - . cygwin-setup.sh - -/Thomas 2015-12-29 diff -Nru cgreen-1.3.0/INSTALL.md cgreen-1.6.3/INSTALL.md --- cgreen-1.3.0/INSTALL.md 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/INSTALL.md 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,97 @@ +# Building and Installing Cgreen + +Here are your alternatives for installing `Cgreen`. + +## Install using your distro package manager + +Cgreen is available for [some Linux distros] +(https://repology.org/project/cgreen/versions). If you are on one of +those you can do + + $ sudo apt install cgreen1 + +or equivalent. Not all distros have an up-to-date version. + + +## Install pre-built binary + +There are occassionally pre-built binaries available from [the GitHub +repo](https://github.com/cgreen-devs/cgreen/releases). You can +download and install these using an appropriate package manager. + + +## Build from source + +### Get source and build + +Clone the [`Cgreen` repo](https://github.com/cgreen-devs/cgreen) + + $ git clone https://github.com/cgreen-devs/cgreen + +or get the source from the same place. + +Then build it + + $ cd cgreen + $ make + +The Makefile is mainly for convenience as `Cgreen` is actually built +using `CMake`. So you can tweak the build using normal `CMake` +settings. + +You can run some tests using + + $ make unit + $ make test + +NOTE: to also build the dynamically auto-discovering runner +`cgreen-runner`, which `make unit` uses, you need `binutils` as per +the description in the README.md. + +### Build options + +You also have some extra options available + +- build with static libraries +- build HTML or PDF documentation + +To enable any of these use the `CMake` graphical user interface (CMakeSetup +on Windows or ccmake on UNIX) to turn these options on or off. + +Note on CYGWIN: Cygwin is not a WIN32 platform, but it is a +DLL-platform where libraries normally goes in the bin directory. Running +the self-tests handles this automatically but if you want to use the binaries +in the 'build'-tree you can either: + +1) install before running the tests and also set your PATH to include +"/usr/local/lib" + +2) setting the PATH to include the build directory where the libraries are + there is a sh-compatible command script to do that for you. From the top + of the Cgreen source directory do: + + . cygwin-setup.sh + +### Use directly + +You can use `Cgreen` from the source tree without actually +installing. Just set your compilation includes to include +`/include` and link with the built library by pointing +the linker to `/build/src/` and use `-lcgreen`. E.g. + + $ cc ... -I/home/me/cgreen/include ... + $ cc ... -L/home/me/cgreen/build/src -lcgreen ... + +### Install + +You can also install `Cgreen` with + + $ make install + +which will install `Cgreen` in what `CMake` considers standard +locations for your environment. Assuming that is `/usr/local` you +should now be able to compile and link using + + $ cc ... -I/usr/local/include ... + $ cc ... -L/usr/local/lib -lcgreen ... + diff -Nru cgreen-1.3.0/Makefile cgreen-1.6.3/Makefile --- cgreen-1.3.0/Makefile 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/Makefile 2023-10-03 15:16:52.000000000 +0000 @@ -13,35 +13,37 @@ # That should build Cgreen in the build directory, run some tests, # install it locally and generate a distributable package. -all: build/Makefile - cd build; make +ifndef VERBOSE +MAKEFLAGS += --no-print-directory +endif -.PHONY:debug -debug: build - cd build; cmake -DCMAKE_BUILD_TYPE:string=Debug ..; make +.PHONY:all +all: build-it -32bit: build - -rm -rf build; mkdir build; cd build; cmake -DCMAKE_C_FLAGS="-m32" -DCMAKE_CXX_FLAGS="-m32" ..; make +.PHONY:debug +debug: + cmake -DCMAKE_BUILD_TYPE:string=Debug -S . -B build + $(MAKE) -C build .PHONY:test -test: build/Makefile - cd build; make check +test: build-it + cd build; ctest .PHONY:clean clean: build/Makefile - cd build; make clean + $(MAKE) -C build clean .PHONY:package package: build/Makefile - cd build; make package + $(MAKE) -C build package .PHONY:install install: build ifeq ($(OS),Msys) - # Thanks to https://stackoverflow.com/a/46027426/204658 - cd build; make install DESTDIR=/ + # Thanks to https://stackoverflow.com/a/46027426/204658 + $(MAKE) -C build install DESTDIR=/ else - cd build; make install + $(MAKE) -C build install endif # This is kind of a hack to get a quicker and clearer feedback when @@ -66,8 +68,10 @@ PREFIX=cyg SUFFIX=.dll else ifeq ($(OS),Msys) +# This is for Msys "proper" +# TODO: handle Msys/Mingw32/64 LDPATH=PATH=$(PWD)/build/src:"$$PATH" - PREFIX=lib + PREFIX=msys- SUFFIX=.dll else LDPATH=LD_LIBRARY_PATH=$(PWD)/build/src @@ -75,20 +79,29 @@ SUFFIX=.so endif +# Here are +# +# 1. tests linked into a library that we run as is +# 2. tests linked into a library that we run and compare its normalized output to expected output + +# TODO: the diff_tools scripts determine prefix and extension by themselves +# Would be better if those were arguments, since we do it here anyway + +# These "diff tools" also normalize the output using replacements DIFF_TOOL=../../tools/cgreen_runner_output_diff XML_DIFF_TOOL=../../tools/cgreen_xml_output_diff DIFF_TOOL_ARGUMENTS = $(1)_tests \ ../../tests \ $(1)_tests.expected +.PHONY: unit unit: build-it - SOURCEDIR=$$PWD/tests/ ; \ cd build ; \ $(LDPATH) tools/cgreen-runner -c `find tests -name $(PREFIX)cgreen_c_tests$(SUFFIX)` ; \ r=$$((r + $$?)) ; \ $(LDPATH) tools/cgreen-runner -c `find tests -name $(PREFIX)cgreen_cpp_tests$(SUFFIX)` ; \ r=$$((r + $$?)) ; \ - $(LDPATH) tools/cgreen-runner -c `find tools/tests -name $(PREFIX)cgreen_runner_tests$(SUFFIX)` ; \ + $(LDPATH) tools/cgreen-runner -c `find tools -name $(PREFIX)cgreen_runner_tests$(SUFFIX)` ; \ r=$$((r + $$?)) ; \ cd tests ; \ $(LDPATH) $(XML_DIFF_TOOL) $(call DIFF_TOOL_ARGUMENTS,xml_output) ; \ @@ -109,10 +122,16 @@ .PHONY: doc doc: build - cd build; cmake -DCGREEN_WITH_HTML_DOCS:bool=TRUE ..; make; cmake -DCGREEN_WITH_HTML_DOCS:bool=False ..; echo open $(PWD)/build/doc/cgreen-guide-en.html + cmake -DCGREEN_WITH_HTML_DOCS:bool=TRUE -S . -B build + cmake --build build + cmake -DCGREEN_WITH_HTML_DOCS:bool=False -S . -B build + echo open $(PWD)/build/doc/cgreen-guide-en.html pdf: build - cd build; cmake -DCGREEN_WITH_PDF_DOCS:bool=TRUE ..; make; cmake -DCGREEN_WITH_PDF_DOCS:bool=False ..; echo open $(PWD)/build/doc/cgreen-guide-en.pdf + cmake -DCGREEN_WITH_PDF_DOCS:bool=TRUE -S . -B build + cmake --build build + cmake -DCGREEN_WITH_PDF_DOCS:bool=FALSE -S . -B build + echo open $(PWD)/build/doc/cgreen-guide-en.pdf chunked: doc asciidoctor-chunker build/doc/cgreen-guide-en.html -o docs @@ -120,29 +139,30 @@ .PHONY:valgrind valgrind: build-it - > valgrind.log - for lib in `ls build/tests/$(PREFIX)*_tests$(SUFFIX)` ; \ + @echo -n "Running all tests under Valgrind " + @> valgrind.log + @for lib in `ls build/tests/$(PREFIX)*_tests$(SUFFIX)` ; \ do \ + echo -n "." ; \ LD_LIBRARY_PATH=build/src valgrind --leak-check=full build/tools/cgreen-runner $$lib >> valgrind.log 2>&1 ; \ done - grep " lost:" valgrind.log | grep -v " 0 bytes" | wc -l + @echo + grep --with-filename --line-number " lost: " valgrind.log | grep -v " 0 bytes" ; \ + if [ $$? -eq 1 ] ; then echo "Nothing lost" ; fi ############# Internal - -build-it: build/Makefile - make -C build - -build: - mkdir build - -build/Makefile: build -ifeq ($(OS),Msys) - # Thanks to https://stackoverflow.com/a/46027426/204658 - cd build; cmake -G"MSYS Makefiles" -DCMAKE_INSTALL_PREFIX=/usr/local .. +build build/Makefile: +ifeq ($(OS),Darwin) + cmake -DCMAKE_OSX_ARCHITECTURES="arm64;x86_64" -S . -B build + #cmake -S . -B build else - cd build; cmake .. + cmake -S . -B build endif +.PHONY:build-it +build-it: build + $(MAKE) -C build + .SILENT: diff -Nru cgreen-1.3.0/README.md cgreen-1.6.3/README.md --- cgreen-1.3.0/README.md 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/README.md 2023-10-03 15:16:52.000000000 +0000 @@ -1,6 +1,5 @@ -[![Build Status](https://travis-ci.org/cgreen-devs/cgreen.svg?branch=master)](https://travis-ci.org/cgreen-devs/cgreen) +[![Build Status](https://app.travis-ci.com/cgreen-devs/cgreen.svg?branch=master)](https://app.travis-ci.com/github/cgreen-devs/cgreen) [![Coverage Status](https://coveralls.io/repos/cgreen-devs/cgreen/badge.svg?branch=master&service=github)](https://coveralls.io/github/cgreen-devs/cgreen?branch=master) -[![Chat on Gitter](https://img.shields.io/gitter/room/badges/shields.svg)](https://gitter.im/cgreen-devs/chat) ![](https://github.com/cgreen-devs/cgreen/blob/master/doc/logo.png?s=300) @@ -25,13 +24,13 @@ Then *Cgreen* is the thing for you! **TLDR;** The full tutorial is on -[github.io](https://cgreen-devs.github.io). Or have a look at the -[cheat sheet](doc/cheat-sheet.md). +[github.io](https://cgreen-devs.github.io/cgreen/cgreen-guide-en.html). +Or have a look at the [cheat sheet](https://cgreen-devs.github.io/cgreen/cheat-sheet.html). ## What It Is -Cgreen is a modern unit test and mocking framework for C and C++. Here -are some of Cgreens unique selling points: +Cgreen is a modern unit test and mocking framework for C and C++. +Here are some of Cgreens unique selling points: - fast build, clean code, highly portable - auto-discovery of tests without the abuse of static initializers or globals @@ -52,24 +51,24 @@ ## Getting It -Cgreen is hosted on GitHub. As of now there are no pre-built packages -to download, so you have to clone the repository or download the -source zip from [GitHub](http://www.github.com/cgreen-devs/cgreen) and -build it yourself. +Cgreen is hosted on [GitHub](https://github.com/cgreen-devs/cgreen). +As of now there are no pre-built packages to download, but Cgreen is available in [Debian, Fedora and some other package repositories](https://repology.org/project/cgreen/versions), although some are lagging. -We are hoping to get Cgreen into Debian and Fedora repositories, but in -the mean time there are some packaging scripts available, not all official: +There are also some other packaging scripts available, not all official: - - [debian packaging is ongoing in #208](https://github.com/cgreen-devs/cgreen/issues/208) - - [fedora/rpm packaging is part of cgreens source tree](https://github.com/cgreen-devs/cgreen/tree/master/contrib/rpm) and actual Fedora repo work is ongoing in [#225](https://github.com/cgreen-devs/cgreen/issues/225) - [PACMAN script](https://github.com/voins/cgreen-pkg) - [MacOS packagesbuild](https://github.com/cgreen-devs/cgreen-macosx-packaging) - [Cygwin package script](https://github.com/cgreen-devs/cgreen-cygport) +You can also clone the repository or download the source zip from [GitHub](http://www.github.com/cgreen-devs/cgreen) and build it yourself. + ## Building It -You need the [CMake](http://www.cmake.org) build system. Most standard -C/C++ compilers should work. GCC definitely does. +You need the [CMake](http://www.cmake.org) build system. +Most standard C/C++ compilers should work. GCC definitely does. + +Perl, diff, find and sed are required to run Cgreen's own +unit-tests. Most distro will have those already installed. In the root directory run ``make``. That will configure and build the library and the `cgreen-runner`, both supporting both C and C++. See @@ -79,7 +78,7 @@ Tests are fairly easy write, as shown by the examples in the beginning of this readme. You should probably read the -[tutorial](https://cgreen-devs.github.io) once before writing your +[tutorial](https://cgreen-devs.github.io/cgreen/cgreen-guide-en.html) once before writing your first test, though. Basically you can run your tests in two ways @@ -109,7 +108,7 @@ ## Reading Up! You can read the extensive tutorial directly on -[GitHub](https://cgreen-devs.github.io). +[GitHub](https://cgreen-devs.github.io/cgreen/cgreen-guide-en.html). There is a [cheat sheet](https://github.com/cgreen-devs/cgreen/blob/master/doc/cheat-sheet.md) available. @@ -119,7 +118,7 @@ configuration. Of course you need [Asciidoctor](http://www.asciidoctor.org). - make docs + make doc make pdf (Generating PDF also requires [asciidoctor-pdf](https://asciidoctor.org/docs/asciidoctor-pdf/).) @@ -144,8 +143,7 @@ is normally run in it's own process. This project is very close in scope to the "Check" unit tester and -was influenced by it... -http://check.sourceforge.net/projects/check/ +was initially influenced by it. The main difference from this tool and other xUnit tools, such as "Check", is that test results are not stored. Instead they are diff -Nru cgreen-1.3.0/setup.sh cgreen-1.6.3/setup.sh --- cgreen-1.3.0/setup.sh 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/setup.sh 2023-10-03 15:16:52.000000000 +0000 @@ -39,6 +39,9 @@ # On linux we need LD_LIBRARY_PATH... export LD_LIBRARY_PATH="$PWD/build/src":$PATH + # MacOS.. + export DYLD_LIBRARY_PATH="$PWD/build/src":$PATH + # ...but on Cygwin DLL:s are searched using the path... export PATH="$PWD/build/src":$PATH fi diff -Nru cgreen-1.3.0/src/assertions.c cgreen-1.6.3/src/assertions.c --- cgreen-1.3.0/src/assertions.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/assertions.c 2023-10-03 15:16:52.000000000 +0000 @@ -40,7 +40,7 @@ return; } - if (constraint->type == DOUBLE_COMPARER) { + if (constraint->type == CGREEN_DOUBLE_COMPARER_CONSTRAINT) { (*get_test_reporter()->assert_true)( get_test_reporter(), file, @@ -98,7 +98,7 @@ return; } - if (constraint->type != DOUBLE_COMPARER) { + if (constraint->type != CGREEN_DOUBLE_COMPARER_CONSTRAINT) { (*get_test_reporter()->assert_true)( get_test_reporter(), file, diff -Nru cgreen-1.3.0/src/CMakeLists.txt cgreen-1.6.3/src/CMakeLists.txt --- cgreen-1.3.0/src/CMakeLists.txt 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/CMakeLists.txt 2023-10-03 15:16:52.000000000 +0000 @@ -1,27 +1,23 @@ -# Generate gitrevision.h if Git is available and the .git directory is found. -# NOTE: $GITDIR is determined in top-level CMakeLists.txt -if (GITDIR AND GIT_EXECUTABLE) - # Create gitrevision.h - add_custom_command(OUTPUT ${PROJECT_SOURCE_DIR}/gitrevision.h - COMMAND ${CMAKE_COMMAND} -E echo_append "#define GITREVISION \"" > ${PROJECT_SOURCE_DIR}/gitrevision.h.tmp - COMMAND ${GIT_EXECUTABLE} describe --tags --always --abbrev=7 --dirty=-modified >> ${PROJECT_SOURCE_DIR}/gitrevision.h.tmp - COMMAND ${CMAKE_COMMAND} -E echo_append "\"" >> ${PROJECT_SOURCE_DIR}/gitrevision.h.tmp - COMMAND sed -e N -e "s/\\n//g" ${PROJECT_SOURCE_DIR}/gitrevision.h.tmp > ${PROJECT_SOURCE_DIR}/gitrevision.h - COMMAND ${CMAKE_COMMAND} -E remove -f ${PROJECT_SOURCE_DIR}/gitrevision.h.tmp - DEPENDS ${GITDIR} - VERBATIM - COMMENT "-- Generating gitrevision.h" - ) +# Generate gitrevision.h +if (GITREVISION AND GIT_EXECUTABLE) + # Allow GITREVISION to be overridden manually / externally. This useful for vendoring. + message(INFO, "GITREVISION manually overridden: ${GITREVISION}") else() - # No version control - # e.g. when the software is built from a source tarball - message(WARNING "-- Unable to find git. Setting git revision to 'unknown'.") - add_custom_command(OUTPUT ${PROJECT_SOURCE_DIR}/gitrevision.h - COMMAND ${CMAKE_COMMAND} -E echo "#define GITREVISION \"unknown\"" > ${PROJECT_SOURCE_DIR}/gitrevision.h - VERBATIM - ) + if (GITDIR AND GIT_EXECUTABLE) + # Use the git version if Git is available and the .git directory is found. + # NOTE: $GITDIR is determined in top-level CMakeLists.txt + execute_process(COMMAND ${GIT_EXECUTABLE} describe --tags --always --abbrev=7 --dirty=-modified + OUTPUT_VARIABLE GITREVISION + OUTPUT_STRIP_TRAILING_WHITESPACE) + else() + # No version control + # e.g. when the software is built from a source tarball + message(WARNING "-- Unable to find git, or not a repo. Setting git revision to 'unknown'.") + set(GITREVISION "built from unknown git commit") + endif() endif() +configure_file(${PROJECT_SOURCE_DIR}/gitrevision.h.in ${PROJECT_SOURCE_DIR}/gitrevision.h @ONLY) set(CGREEN_PUBLIC_INCLUDE_DIRS ${PROJECT_SOURCE_DIR}/ @@ -52,63 +48,72 @@ endif(WIN32 AND NOT CYGWIN) set(cgreen_SRCS - assertions.c - boxed_double.c - breadcrumb.c - cgreen_time.c - cgreen_value.c - constraint.c - constraint_syntax_helpers.c - cute_reporter.c - cdash_reporter.c - messaging.c - message_formatting.c - mocks.c - parameters.c - reporter.c - runner.c - string_comparison.c - suite.c - text_reporter.c - utils.c - vector.c - xml_reporter.c -) + ${CMAKE_CURRENT_SOURCE_DIR}/assertions.c + ${CMAKE_CURRENT_SOURCE_DIR}/boxed_double.c + ${CMAKE_CURRENT_SOURCE_DIR}/breadcrumb.c + ${CMAKE_CURRENT_SOURCE_DIR}/cgreen_time.c + ${CMAKE_CURRENT_SOURCE_DIR}/cgreen_value.c + ${CMAKE_CURRENT_SOURCE_DIR}/constraint.c + ${CMAKE_CURRENT_SOURCE_DIR}/cute_reporter.c + ${CMAKE_CURRENT_SOURCE_DIR}/cdash_reporter.c + ${CMAKE_CURRENT_SOURCE_DIR}/messaging.c + ${CMAKE_CURRENT_SOURCE_DIR}/message_formatting.c + ${CMAKE_CURRENT_SOURCE_DIR}/mocks.c + ${CMAKE_CURRENT_SOURCE_DIR}/parameters.c + ${CMAKE_CURRENT_SOURCE_DIR}/reporter.c + ${CMAKE_CURRENT_SOURCE_DIR}/runner.c + ${CMAKE_CURRENT_SOURCE_DIR}/string_comparison.c + ${CMAKE_CURRENT_SOURCE_DIR}/suite.c + ${CMAKE_CURRENT_SOURCE_DIR}/text_reporter.c + ${CMAKE_CURRENT_SOURCE_DIR}/utils.c + ${CMAKE_CURRENT_SOURCE_DIR}/vector.c +) +if (CGREEN_WITH_XML) + LIST(APPEND cgreen_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/xml_reporter.c) +endif (CGREEN_WITH_XML) +if (CGREEN_WITH_LIBXML2) + LIST(APPEND cgreen_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/libxml_reporter.c) +endif (CGREEN_WITH_LIBXML2) if (MSYS) # Msys2 is difficult since it really is three different "OS":es, Msys native, W32 and W64 # To get somewhere, let's use the native Msys2, which actually is Cygwin/UNIX. LIST(APPEND cgreen_SRCS - posix_cgreen_pipe.c - posix_cgreen_time.c - posix_runner_platform.c + ${CMAKE_CURRENT_SOURCE_DIR}/posix_cgreen_pipe.c + ${CMAKE_CURRENT_SOURCE_DIR}/posix_cgreen_time.c + ${CMAKE_CURRENT_SOURCE_DIR}/posix_runner_platform.c ) elseif (UNIX OR CYGWIN) LIST(APPEND cgreen_SRCS - posix_cgreen_pipe.c - posix_cgreen_time.c - posix_runner_platform.c + ${CMAKE_CURRENT_SOURCE_DIR}/posix_cgreen_pipe.c + ${CMAKE_CURRENT_SOURCE_DIR}/posix_cgreen_time.c + ${CMAKE_CURRENT_SOURCE_DIR}/posix_runner_platform.c ) elseif(WIN32) LIST(APPEND cgreen_SRCS - win32_cgreen_pipe.c - win32_cgreen_time.c - win32_runner_platform.c + ${CMAKE_CURRENT_SOURCE_DIR}/win32_cgreen_pipe.c + ${CMAKE_CURRENT_SOURCE_DIR}/win32_cgreen_time.c + ${CMAKE_CURRENT_SOURCE_DIR}/win32_runner_platform.c ) else() - message(FATAL_ERROR "Cgreen can currently only be compiled for Msys2, Cygwin, MacOSX and Windows.") + message(FATAL_ERROR "Cgreen can currently only be compiled for Linux, Cygwin, MacOSX and Msys2 (native, not MingW versions). Patches are welcome!") endif() SET_SOURCE_FILES_PROPERTIES(${cgreen_SRCS} PROPERTIES LANGUAGE C) set(cgreen_SRCS ${cgreen_SRCS} - cpp_assertions.cpp - cpp_constraint.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/cpp_assertions.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/cpp_constraint.cpp ) SET_SOURCE_FILES_PROPERTIES(cpp_assertions.cpp cpp_constraint.cpp PROPERTIES LANGUAGE CXX ) + +include(DefineRelativeFilePaths) +cmake_define_relative_file_paths ("${cgreen_SRCS}") + include_directories( ${CGREEN_PUBLIC_INCLUDE_DIRS} ${CGREEN_PRIVATE_INCLUDE_DIRS} + $<$:${LIBXML2_INCLUDE_DIRS}> ) ### cgreen @@ -121,6 +126,7 @@ ${CMAKE_THREAD_LIBS_INIT} ${MATH_LIB} ${CMAKE_CXX_IMPLICIT_LINK_LIBRARIES} + $<$:${LIBXML2_LIBRARIES}> ) set_target_properties( diff -Nru cgreen-1.3.0/src/constraint.c cgreen-1.6.3/src/constraint.c --- cgreen-1.3.0/src/constraint.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/constraint.c 2023-10-03 15:16:52.000000000 +0000 @@ -80,14 +80,15 @@ static void set_contents(Constraint *constraint, const char *function, CgreenValue actual, const char *test_file, int test_line, TestReporter *reporter); +static void execute_sideeffect(Constraint *constraint, const char *function, CgreenValue actual, + const char *test_file, int test_line, TestReporter *reporter); +static void capture_parameter(Constraint *constraint, const char *function, CgreenValue actual, + const char *test_file, int test_line, TestReporter *reporter); static const char *default_actual_value_message = "\n\t\tactual value:\t\t\t[%" PRIdPTR "]"; static const char *default_expected_value_message = "\t\texpected value:\t\t\t[%" PRIdPTR "]"; -static void execute_sideeffect(Constraint *constraint, const char *function, CgreenValue actual, - const char *test_file, int test_line, TestReporter *reporter); - -Constraint *create_constraint() { +Constraint *create_constraint(void) { Constraint *constraint = (Constraint *)malloc(sizeof(Constraint)); /* TODO: setting this to NULL as an implicit type check :( */ constraint->parameter_name = NULL; @@ -144,6 +145,7 @@ } bool constraint_is_not_for_parameter(const Constraint *constraint, const char *parameter) { + /* TODO: This should not test for types of constraints... Or it actually checks for something else ... */ if (is_not_comparing(constraint) && is_not_content_setting(constraint)) { return true; } @@ -151,9 +153,61 @@ return strcmp(constraint->parameter_name, parameter) != 0; } +Constraint *create_not_null_constraint(void) { + Constraint *constraint = create_constraint_expecting(make_cgreen_integer_value(0), "null"); + constraint->type = CGREEN_VALUE_COMPARER_CONSTRAINT; + + constraint->name = "be non null"; + constraint->compare = &compare_do_not_want_value; + constraint->execute = &test_want; + constraint->actual_value_message = ""; + constraint->expected_value_message = ""; + + return constraint; +} + +Constraint *create_is_null_constraint(void) { + Constraint *constraint = create_constraint_expecting(make_cgreen_integer_value(0), "null"); + constraint->type = CGREEN_VALUE_COMPARER_CONSTRAINT; + + constraint->name = "be null"; + constraint->compare = &compare_want_value; + constraint->execute = &test_want; + constraint->actual_value_message = ""; + constraint->expected_value_message = ""; + + return constraint; +} + +Constraint *create_is_false_constraint(void) { + Constraint *constraint = create_constraint_expecting(make_cgreen_integer_value(false), "false"); + constraint->type = CGREEN_VALUE_COMPARER_CONSTRAINT; + + constraint->name = "be false"; + constraint->compare = &compare_want_value; + constraint->execute = &test_want; + constraint->actual_value_message = ""; + constraint->expected_value_message = ""; + + return constraint; +} + +Constraint *create_is_true_constraint(void) { + Constraint *constraint = create_constraint_expecting(make_cgreen_integer_value(false), "true"); + constraint->type = CGREEN_VALUE_COMPARER_CONSTRAINT; + + constraint->name = "be true"; + constraint->compare = &compare_do_not_want_value; + constraint->execute = &test_want; + constraint->actual_value_message = ""; + constraint->expected_value_message = ""; + + return constraint; +} + Constraint *create_equal_to_value_constraint(intptr_t expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_integer_value(expected_value), expected_value_name); - constraint->type = VALUE_COMPARER; + constraint->type = CGREEN_VALUE_COMPARER_CONSTRAINT; constraint->compare = &compare_want_value; constraint->execute = &test_want; @@ -165,7 +219,7 @@ Constraint *create_equal_to_hexvalue_constraint(intptr_t expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_integer_value(expected_value), expected_value_name); - constraint->type = VALUE_COMPARER; + constraint->type = CGREEN_VALUE_COMPARER_CONSTRAINT; constraint->compare = &compare_want_value; constraint->execute = &test_want; @@ -179,7 +233,7 @@ Constraint *create_not_equal_to_value_constraint(intptr_t expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_integer_value(expected_value), expected_value_name); - constraint->type = VALUE_COMPARER; + constraint->type = CGREEN_VALUE_COMPARER_CONSTRAINT; constraint->compare = &compare_do_not_want_value; constraint->execute = &test_want; @@ -191,7 +245,7 @@ Constraint *create_less_than_value_constraint(intptr_t expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_integer_value(expected_value), expected_value_name); - constraint->type = VALUE_COMPARER; + constraint->type = CGREEN_VALUE_COMPARER_CONSTRAINT; constraint->compare = &compare_want_lesser_value; constraint->execute = &test_true; @@ -204,7 +258,7 @@ Constraint *create_greater_than_value_constraint(intptr_t expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_integer_value(expected_value), expected_value_name); - constraint->type = VALUE_COMPARER; + constraint->type = CGREEN_VALUE_COMPARER_CONSTRAINT; constraint->compare = &compare_want_greater_value; constraint->execute = &test_true; @@ -217,7 +271,7 @@ Constraint *create_equal_to_contents_constraint(void *pointer_to_compare, size_t size_to_compare, const char *compared_pointer_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_pointer_value(pointer_to_compare), compared_pointer_name); - constraint->type = CONTENT_COMPARER; + constraint->type = CGREEN_CONTENT_COMPARER_CONSTRAINT; constraint->compare = &compare_want_contents; constraint->execute = &test_want; @@ -229,7 +283,7 @@ Constraint *create_not_equal_to_contents_constraint(void *pointer_to_compare, size_t size_to_compare, const char *compared_pointer_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_pointer_value(pointer_to_compare), compared_pointer_name); - constraint->type = CONTENT_COMPARER; + constraint->type = CGREEN_CONTENT_COMPARER_CONSTRAINT; constraint->compare = &compare_do_not_want_contents; constraint->execute = &test_want; @@ -241,7 +295,7 @@ Constraint *create_equal_to_string_constraint(const char* expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_string_value(expected_value), expected_value_name); - constraint->type = STRING_COMPARER; + constraint->type = CGREEN_STRING_COMPARER_CONSTRAINT; constraint->compare = &compare_want_string; constraint->execute = &test_want; @@ -254,7 +308,7 @@ Constraint *create_not_equal_to_string_constraint(const char* expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_string_value(expected_value), expected_value_name); - constraint->type = STRING_COMPARER; + constraint->type = CGREEN_STRING_COMPARER_CONSTRAINT; constraint->compare = &compare_do_not_want_string; constraint->execute = &test_want; @@ -267,7 +321,7 @@ Constraint *create_contains_string_constraint(const char* expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_string_value(expected_value), expected_value_name); - constraint->type = STRING_COMPARER; + constraint->type = CGREEN_STRING_COMPARER_CONSTRAINT; constraint->compare = &compare_want_substring; constraint->execute = &test_want; @@ -280,7 +334,7 @@ Constraint *create_does_not_contain_string_constraint(const char* expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_string_value(expected_value), expected_value_name); - constraint->type = STRING_COMPARER; + constraint->type = CGREEN_STRING_COMPARER_CONSTRAINT; constraint->compare = &compare_do_not_want_substring; constraint->execute = &test_want; @@ -293,7 +347,7 @@ Constraint *create_begins_with_string_constraint(const char* expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_string_value(expected_value), expected_value_name); - constraint->type = STRING_COMPARER; + constraint->type = CGREEN_STRING_COMPARER_CONSTRAINT; constraint->compare = &compare_want_beginning_of_string; constraint->execute = &test_want; @@ -306,7 +360,7 @@ Constraint *create_does_not_begin_with_string_constraint(const char* expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_string_value(expected_value), expected_value_name); - constraint->type = STRING_COMPARER; + constraint->type = CGREEN_STRING_COMPARER_CONSTRAINT; constraint->compare = &compare_do_not_want_beginning_of_string; constraint->execute = &test_want; @@ -319,7 +373,7 @@ Constraint *create_ends_with_string_constraint(const char* expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_string_value(expected_value), expected_value_name); - constraint->type = STRING_COMPARER; + constraint->type = CGREEN_STRING_COMPARER_CONSTRAINT; constraint->compare = &compare_want_end_of_string; constraint->execute = &test_want; @@ -332,7 +386,7 @@ Constraint *create_does_not_end_with_string_constraint(const char* expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_string_value(expected_value), expected_value_name); - constraint->type = STRING_COMPARER; + constraint->type = CGREEN_STRING_COMPARER_CONSTRAINT; constraint->compare = &compare_do_not_want_end_of_string; constraint->execute = &test_want; @@ -345,7 +399,7 @@ Constraint *create_equal_to_double_constraint(double expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_double_value(expected_value), expected_value_name); - constraint->type = DOUBLE_COMPARER; + constraint->type = CGREEN_DOUBLE_COMPARER_CONSTRAINT; constraint->compare = &compare_want_double; constraint->execute = &test_want_double; @@ -357,7 +411,7 @@ Constraint *create_not_equal_to_double_constraint(double expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_double_value(expected_value), expected_value_name); - constraint->type = DOUBLE_COMPARER; + constraint->type = CGREEN_DOUBLE_COMPARER_CONSTRAINT; constraint->compare = &compare_do_not_want_double; constraint->execute = &test_do_not_want_double; @@ -369,7 +423,7 @@ Constraint *create_less_than_double_constraint(double expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_double_value(expected_value), expected_value_name); - constraint->type = DOUBLE_COMPARER; + constraint->type = CGREEN_DOUBLE_COMPARER_CONSTRAINT; constraint->compare = &compare_want_lesser_double; constraint->execute = &test_true; @@ -382,7 +436,7 @@ Constraint *create_greater_than_double_constraint(double expected_value, const char *expected_value_name) { Constraint *constraint = create_constraint_expecting(make_cgreen_double_value(expected_value), expected_value_name); - constraint->type = DOUBLE_COMPARER; + constraint->type = CGREEN_DOUBLE_COMPARER_CONSTRAINT; constraint->compare = &compare_want_greater_double; constraint->execute = &test_true; @@ -395,7 +449,7 @@ Constraint *create_return_value_constraint(intptr_t value_to_return) { Constraint* constraint = create_constraint(); - constraint->type = RETURN_VALUE; + constraint->type = CGREEN_RETURN_VALUE_CONSTRAINT; constraint->compare = &compare_true; constraint->execute = &test_true; @@ -409,19 +463,20 @@ intptr_t actual_return = (intptr_t) malloc(size); memcpy((void*)actual_return, (void*)value_to_return, size); Constraint* constraint = create_constraint(); - constraint->type = RETURN_VALUE; + constraint->type = CGREEN_RETURN_BY_VALUE_CONSTRAINT; constraint->compare = &compare_true; constraint->execute = &test_true; - constraint->name = "return value"; + constraint->name = "return by value"; constraint->expected_value = make_cgreen_by_value((void*)actual_return, size); + constraint->destroy = &destroy_by_value_constraint; return constraint; } Constraint *create_return_double_value_constraint(double value_to_return) { Constraint* constraint = create_constraint(); - constraint->type = RETURN_VALUE; + constraint->type = CGREEN_RETURN_VALUE_CONSTRAINT; constraint->compare = &compare_true; constraint->execute = &test_true; @@ -433,7 +488,7 @@ Constraint *create_set_parameter_value_constraint(const char *parameter_name, intptr_t value_to_set, size_t size_to_set) { Constraint* constraint = create_constraint(); - constraint->type = CONTENT_SETTER; + constraint->type = CGREEN_CONTENT_SETTER_CONSTRAINT; constraint->compare = &compare_true; constraint->execute = &set_contents; @@ -447,7 +502,7 @@ Constraint *create_with_side_effect_constraint(void (*callback)(void *), void *data) { Constraint* constraint = create_constraint(); - constraint->type = CALL; + constraint->type = CGREEN_CALL_CONSTRAINT; constraint->name = "cause side effect"; constraint->side_effect_callback = callback; @@ -457,6 +512,20 @@ return constraint; } +Constraint *create_capture_parameter_constraint(const char *parameter_name, void *capture_to, size_t size_to_capture) { + Constraint* constraint = create_constraint(); + constraint->type = CGREEN_CAPTURE_PARAMETER_CONSTRAINT; + + constraint->compare = &compare_true; + constraint->execute = &capture_parameter; + constraint->name = "capture parameter"; + constraint->expected_value = make_cgreen_pointer_value(capture_to); + constraint->size_of_expected_value = size_to_capture; + constraint->parameter_name = parameter_name; + + return constraint; +} + bool compare_want_value(Constraint *constraint, CgreenValue actual) { return constraint->expected_value.value.integer_value == actual.value.integer_value; } @@ -502,7 +571,6 @@ /* TODO: should propagate the whole actual */ if (parameters_are_not_valid_for(constraint, actual.value.integer_value)) { message = validation_failure_message_for(constraint, actual.value.integer_value); - (*reporter->assert_true)( reporter, test_file, @@ -514,7 +582,7 @@ return; } - memmove((void *)actual.value.integer_value, constraint->expected_value.value.pointer_value, constraint->size_of_expected_value); + memmove((void *)actual.value.pointer_value, constraint->expected_value.value.pointer_value, constraint->size_of_expected_value); } static void execute_sideeffect(Constraint *constraint, const char *function, CgreenValue actual, @@ -523,7 +591,6 @@ (void)actual; if (constraint->side_effect_callback == NULL) { - (*reporter->assert_true)( reporter, test_file, @@ -534,6 +601,28 @@ (constraint->side_effect_callback)(constraint->side_effect_data); } +#define IS_BIG_ENDIAN (!*(unsigned char *)&(uint16_t){1}) +static bool bigendian(void) { return IS_BIG_ENDIAN; } + +static void capture_parameter(Constraint *constraint, const char *function, CgreenValue actual, + const char *test_file, int test_line, TestReporter *reporter) { + (void)function; + (void)test_file; + (void)test_line; + (void)reporter; + + if ((sizeof(intptr_t) != constraint->size_of_expected_value) && bigendian()) { + // Then the beginning of a smaller value is not stored at the beginning of the actual.value union + size_t offset = sizeof(intptr_t) - constraint->size_of_expected_value; + // Offset is in bytes so we need to cast &actual.value to that before adding the offset + void *start_address = (unsigned char *)&actual.value + offset; + memmove(constraint->expected_value.value.pointer_value, start_address, constraint->size_of_expected_value); + + } else + memmove(constraint->expected_value.value.pointer_value, &actual.value, + constraint->size_of_expected_value); + } + void test_want(Constraint *constraint, const char *function, CgreenValue actual, const char *test_file, int test_line, TestReporter *reporter) { @@ -606,13 +695,15 @@ } static bool compare_want_end_of_string(Constraint *constraint, CgreenValue actual) { - return strpos(actual.value.string_value, constraint->expected_value.value.string_value) == - strlen(actual.value.string_value) - strlen(constraint->expected_value.value.string_value); + int match_length = strlen(constraint->expected_value.value.string_value); + int start_position = strlen(actual.value.string_value) - match_length; + if (start_position < 0) + return false; + return strcmp(&actual.value.string_value[start_position], constraint->expected_value.value.string_value) == 0; } static bool compare_do_not_want_end_of_string(Constraint *constraint, CgreenValue actual) { - return strpos(actual.value.string_value, constraint->expected_value.value.string_value) != - strlen(actual.value.string_value) - strlen(constraint->expected_value.value.string_value); + return !compare_want_end_of_string(constraint, actual); } @@ -672,6 +763,13 @@ destroy_empty_constraint(constraint); } +void destroy_by_value_constraint(Constraint *constraint) { + // TODO: we should return the allocated area, but that conflicts with reporter printing it + // Now, how does the string constraints do it? + destroy_cgreen_value(constraint->expected_value); + destroy_empty_constraint(constraint); +} + static bool compare_true(Constraint *constraint, CgreenValue actual) { (void)constraint; (void)actual; @@ -699,11 +797,11 @@ } bool is_content_comparing(const Constraint *constraint) { - return constraint->type == CONTENT_COMPARER; + return constraint->type == CGREEN_CONTENT_COMPARER_CONSTRAINT; } bool is_content_setting(const Constraint *constraint) { - return constraint->type == CONTENT_SETTER; + return constraint->type == CGREEN_CONTENT_SETTER_CONSTRAINT; } bool is_not_content_setting(const Constraint *constraint) { @@ -711,18 +809,19 @@ } bool is_string_comparing(const Constraint *constraint) { - return constraint->type == STRING_COMPARER; + return constraint->type == CGREEN_STRING_COMPARER_CONSTRAINT; } bool is_double_comparing(const Constraint *constraint) { - return constraint->type == DOUBLE_COMPARER; + return constraint->type == CGREEN_DOUBLE_COMPARER_CONSTRAINT; } bool is_comparing(const Constraint *constraint) { return is_string_comparing(constraint) || - is_content_comparing(constraint) || - is_double_comparing(constraint) || - constraint->type == VALUE_COMPARER; + is_content_comparing(constraint) || + is_double_comparing(constraint) || + constraint->type == CGREEN_VALUE_COMPARER_CONSTRAINT || + constraint->type == CGREEN_CAPTURE_PARAMETER_CONSTRAINT; } bool is_not_comparing(const Constraint *constraint) { @@ -775,7 +874,7 @@ significant_figures = figures; } -int get_significant_figures() { +int get_significant_figures(void) { return significant_figures; } diff -Nru cgreen-1.3.0/src/constraint_internal.h cgreen-1.6.3/src/constraint_internal.h --- cgreen-1.3.0/src/constraint_internal.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/constraint_internal.h 2023-10-03 15:16:52.000000000 +0000 @@ -1,6 +1,8 @@ #ifndef CONSTRAINT_INTERNAL_H #define CONSTRAINT_INTERNAL_H +#include + /* constraints internal functions are used from some user level tests so must be compilable in C++ */ #ifdef __cplusplus namespace cgreen { @@ -11,6 +13,7 @@ extern void destroy_static_constraint(Constraint *constraint); extern void destroy_string_constraint(Constraint *constraint); extern void destroy_double_constraint(Constraint *constraint); +extern void destroy_by_value_constraint(Constraint *constraint); extern void destroy_constraint(Constraint *); extern void destroy_constraints(va_list constraints); diff -Nru cgreen-1.3.0/src/constraint_syntax_helpers.c cgreen-1.6.3/src/constraint_syntax_helpers.c --- cgreen-1.3.0/src/constraint_syntax_helpers.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/constraint_syntax_helpers.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,82 +0,0 @@ -#include -#include -#include "constraint_internal.h" -#include -#include -#include - - -Constraint static_is_non_null_constraint = { - /* .type */ VALUE_COMPARER, - /* .name */ "be non null", - /* .destroy */ destroy_static_constraint, - /* .compare */ compare_do_not_want_value, - /* .test */ test_want, - /* .format_failure_message_for */ failure_message_for, - /* .actual_value_message */ "", - /* .expected_value_message */ "", - /* .expected_value */ {CGREEN_INTEGER, {0}, 0}, - /* .stored_value_name */ "null", - /* .parameter_name */ NULL, - /* .size_of_stored_value */ 0, - /* .side_effect_callback */ NULL, - /* .side_effect_data */ NULL -}; - -Constraint static_is_null_constraint = { - /* .type */ VALUE_COMPARER, - /* .name */ "be null", - /* .destroy */ destroy_static_constraint, - /* .compare */ compare_want_value, - /* .test */ test_want, - /* .format_failure_message_for */ failure_message_for, - /* .actual_value_message */ "", - /* .expected_value_message */ "", - /* .expected_value */ {CGREEN_INTEGER, {(intptr_t)NULL}, 0}, - /* .stored_value_name */ "null", - /* .parameter_name */ NULL, - /* .size_of_stored_value */ 0, - /* .side_effect_callback */ NULL, - /* .side_effect_data */ NULL -}; - -Constraint static_is_false_constraint = { - /* .type */ VALUE_COMPARER, - /* .name */ "be false", - /* .destroy */ destroy_static_constraint, - /* .compare */ compare_want_value, - /* .test */ test_want, - /* .format_failure_message_for */ failure_message_for, - /* .actual_value_message */ "", - /* .expected_value_message */ "", - /* .expected_value */ {CGREEN_INTEGER, {false}, 0}, - /* .stored_value_name */ "false", - /* .parameter_name */ NULL, - /* .size_of_stored_value */ 0, - /* .side_effect_callback */ NULL, - /* .side_effect_data */ NULL -}; - -Constraint static_is_true_constraint = { - /* .type */ VALUE_COMPARER, - /* .name */ "be true", - /* .destroy */ destroy_static_constraint, - /* .compare */ compare_do_not_want_value, - /* .test */ test_want, - /* .format_failure_message_for */ failure_message_for, - /* .actual_value_message */ "", - /* .expected_value_message */ "", - /* .expected_value */ {CGREEN_INTEGER, {false}, 0}, - /* .stored_value_name */ "true", - /* .parameter_name */ NULL, - /* .size_of_stored_value */ 0, - /* .side_effect_callback */ NULL, - /* .side_effect_data */ NULL -}; - -Constraint *is_non_null = &static_is_non_null_constraint; -Constraint *is_null = &static_is_null_constraint; -Constraint *is_false = &static_is_false_constraint; -Constraint *is_true = &static_is_true_constraint; - -/* vim: set ts=4 sw=4 et cindent: */ diff -Nru cgreen-1.3.0/src/cute_reporter.c cgreen-1.6.3/src/cute_reporter.c --- cgreen-1.3.0/src/cute_reporter.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/cute_reporter.c 2023-10-03 15:16:52.000000000 +0000 @@ -58,7 +58,7 @@ return NULL; } reporter->memo = memo; - + set_cute_reporter_printer(reporter, printf); set_cute_reporter_vprinter(reporter, vprintf); @@ -101,14 +101,14 @@ reporter_finish_test(reporter, filename, line, message); if (memo->error_count == reporter->failures + reporter->exceptions) { - memo->printer("#success %s, %d ms OK\n", name); + memo->printer("#success %s OK\n", name); } } static void cute_finish_suite(TestReporter *reporter, const char *filename, int line) { const char *name = get_current_from_breadcrumb((CgreenBreadcrumb *)reporter->breadcrumb); CuteMemo *memo = (CuteMemo *)reporter->memo; - + reporter_finish_test(reporter, filename, line, NULL); reporter->total_passes += reporter->passes; @@ -119,9 +119,9 @@ memo->printer("#ending %s", name); if (get_breadcrumb_depth((CgreenBreadcrumb *) reporter->breadcrumb) == 0) { memo->printer(": %d pass%s, %d failure%s, %d exception%s, %d ms.\n", - reporter->passes, reporter->passes == 1 ? "" : "es", - reporter->failures, reporter->failures == 1 ? "" : "s", - reporter->exceptions, reporter->exceptions == 1 ? "" : "s", + reporter->total_passes, reporter->total_passes == 1 ? "" : "es", + reporter->total_failures, reporter->total_failures == 1 ? "" : "s", + reporter->total_exceptions, reporter->total_exceptions == 1 ? "" : "s", reporter->total_duration); } else memo->printer("\n"); diff -Nru cgreen-1.3.0/src/libxml_reporter.c cgreen-1.6.3/src/libxml_reporter.c --- cgreen-1.3.0/src/libxml_reporter.c 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/src/libxml_reporter.c 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,490 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "libxml_reporter_internal.h" + +#ifdef __ANDROID__ +#include "cgreen/internal/android_headers/androidcompat.h" +#endif // #ifdef __ANDROID__ + +typedef struct { + XmlPrinter *printer; + int segment_count; +} XmlMemo; + +#define XMLSTRING(x) (BAD_CAST x) + +static void xml_reporter_start_suite(TestReporter *reporter, const char *name, + int count); +static void xml_reporter_start_test(TestReporter *reporter, const char *name); +static void xml_reporter_finish_test(TestReporter *reporter, const char *filename, + int line, const char *message); +static void xml_reporter_finish_suite(TestReporter *reporter, const char *filename, + int line); +static void xml_show_skip(TestReporter *reporter, const char *file, int line); +static void xml_show_fail(TestReporter *reporter, const char *file, int line, + const char *message, va_list arguments); +static void xml_show_incomplete(TestReporter *reporter, const char *filename, + int line, const char *message, va_list arguments); + +static const char *file_prefix; + +static int default_printer(xmlDocPtr doc); + +void set_libxml_reporter_printer(TestReporter *reporter, XmlPrinter *printer) +{ + XmlMemo *memo = (XmlMemo*)reporter->memo; + memo->printer = printer; +} + +TestReporter *create_libxml_reporter(const char *prefix) { + TestReporter *reporter; + XmlMemo *memo; + + reporter = create_reporter(); + if (reporter == NULL) { + return NULL; + } + + memo = (XmlMemo *) malloc(sizeof(XmlMemo)); + if (memo == NULL) { + destroy_reporter(reporter); + return NULL; + } + reporter->memo = memo; + memo->printer = &default_printer; + + file_prefix = prefix; + reporter->start_suite = &xml_reporter_start_suite; + reporter->start_test = &xml_reporter_start_test; + reporter->show_fail = &xml_show_fail; + reporter->show_skip = &xml_show_skip; + reporter->show_incomplete = &xml_show_incomplete; + reporter->finish_test = &xml_reporter_finish_test; + reporter->finish_suite = &xml_reporter_finish_suite; + return reporter; +} + +#ifndef PATH_MAX +#define PATH_MAX 4096 +#endif +static char suite_path[PATH_MAX]; + +static void print_path_separator_if_needed(int *more_segments) { + if (*more_segments > 0) { + strcat(suite_path, "/"); + (*more_segments)--; + } +} + +static void print_path_segment_walker(const char *segment, void *void_memo) { + XmlMemo *memo = (XmlMemo *)void_memo; + + strncat(suite_path, segment, sizeof(suite_path)-strlen(suite_path)-1); + print_path_separator_if_needed(&memo->segment_count); +} + +static void strcat_path_segment(const char *segment, void *more_segments) { + (void)more_segments; + if (suite_path[0] != '\0') strcat(suite_path, "-"); + strncat(suite_path, segment, sizeof(suite_path)-strlen(suite_path)-1); +} + +static void add_suite_name(const char *suite_name) { + if (suite_path[0] != '\0') + strcat(suite_path, "-"); + strncat(suite_path, suite_name, sizeof(suite_path)-strlen(suite_path)-1); +} + +#define NESTED_SUITE_MAX 100 +struct xml_suite_context { + xmlDocPtr doc; + xmlNodePtr suite, curTest; + FILE* outFile; + uint32_t suite_duration; +}; +static struct xml_suite_context context_stack[NESTED_SUITE_MAX]; +static int context_stack_p = 0; + +/* + * The table below is taken from the "Non-restricted characters" section of + * https://en.wikipedia.org/wiki/Valid_characters_in_XML + */ +static bool isNonRestrictedXMLChar(unsigned int c) { + return (c == 0x09u || c == 0x0Au || c == 0x0Du || + (c >= 0x20u && c <= 0x7Eu) || c == 0x85u || + (c >= 0xA0u && c <= 0xD7FFu) || + (c >= 0xE000u && c <= 0xFDCFu) || + (c >= 0xFDF0u && c <= 0xFFFDu) || + (c >= 0x10000u && c <= 0x1FFFDu) || + (c >= 0x20000u && c <= 0x2FFFDu) || + (c >= 0x30000u && c <= 0x3FFFDu) || + (c >= 0x40000u && c <= 0x4FFFDu) || + (c >= 0x50000u && c <= 0x5FFFDu) || + (c >= 0x60000u && c <= 0x6FFFDu) || + (c >= 0x70000u && c <= 0x7FFFDu) || + (c >= 0x80000u && c <= 0x8FFFDu) || + (c >= 0x90000u && c <= 0x9FFFDu) || + (c >= 0xA0000u && c <= 0xAFFFDu) || + (c >= 0xB0000u && c <= 0xBFFFDu) || + (c >= 0xC0000u && c <= 0xCFFFDu) || + (c >= 0xD0000u && c <= 0xDFFFDu) || + (c >= 0xE0000u && c <= 0xEFFFDu) || + (c >= 0xF0000u && c <= 0xFFFFDu) || + (c >= 0x100000u && c <= 0x10FFFD)); +} + +static bool isOverlongUTF8(unsigned int ucs, int len) +{ + return ((ucs <= 0x7f && len > 1) || + (ucs <= 0x7ff && len > 2) || + (ucs <= 0xffff && len > 3) || + (ucs <= 0x10ffff && len > 4)); +} + +/* + * Return a copy of the argument prepared to be used as an attribute value. + * The produced result is not necessary convertable back to the original set + * of bytes. + */ +static xmlChar* xmlEscapePropValue(const char *str) { + size_t len = strlen(str); + /* + * The worst-case length of the output is 4 times the input length (if + * every input byte has to be escaped) plus 1 for the terminating NUL + * character + */ + size_t retLen = len * 4 + 1; + xmlChar* ret = xmlMalloc(retLen), *retPos = ret, *retEnd = ret + retLen; + if (!ret) { + fprintf(stderr, "memory allocation failure in %s\n", __func__); + exit(EXIT_FAILURE); + } + + const unsigned char *it = (const unsigned char*)str; + const unsigned char *itEnd = it + len; + while (*it) { + int utfLen = itEnd - it; + int ucs = xmlGetUTF8Char(it, &utfLen); + if (ucs != -1) { + if (!isOverlongUTF8(ucs, utfLen)) { + if (isNonRestrictedXMLChar(ucs)) { + /* Valid UTF8 sequence of an allowed character */ + while (utfLen--) + *retPos++ = *it++; + } else { + xmlStrPrintf(retPos, retEnd - retPos, "&x%x;", ucs); + } + } else { + /* Disallowed character or overlong UTF8, escape entire sequence */ + for(int i = 0;i < utfLen;++i) { + xmlStrPrintf(retPos, retEnd-retPos, "\\x%.2" PRIx8, *it); + retPos += 4; + ++it; + } + } + } else { + /* Invalid UTF8, escape one byte then try to parse rest of input as UTF8 again */ + xmlStrPrintf(retPos, retEnd-retPos, "\\x%.2" PRIx8, *it); + retPos += 4; + ++it; + } + } + + *retPos = '\0'; + return ret; +} + +static void xml_reporter_start_suite(TestReporter *reporter, const char *suitename, int count) { + char filename[PATH_MAX]; + int segment_decrementer = reporter->breadcrumb->depth; + XmlMemo *memo = (XmlMemo *)reporter->memo; + if (context_stack_p >= NESTED_SUITE_MAX) + abort(); + struct xml_suite_context *ctx = &context_stack[context_stack_p++]; + + FILE *out; + + (void)count; /* UNUSED */ + + reporter->passes = 0; + reporter->failures = 0; + reporter->skips = 0; + reporter->exceptions = 0; + + suite_path[0] = '\0'; + walk_breadcrumb(reporter->breadcrumb, strcat_path_segment, &segment_decrementer); + add_suite_name(suitename); + + if (snprintf(filename, sizeof(filename), "%s-%s.xml", file_prefix, suite_path) >= (int)sizeof(filename)) { + fprintf(stderr, "filename truncated; exceeds PATH_MAX (= %d)\n", PATH_MAX); + exit(EXIT_FAILURE); + } + if (memo->printer == default_printer) { + // If we're really printing to files, then open one... + out = fopen(filename, "w"); + if (!out) { + fprintf(stderr, "could not open %s: %s\r\n", filename, strerror(errno)); + exit(EXIT_FAILURE); + } + ctx->outFile = out; + } + + ctx->suite_duration = 0; + ctx->doc = xmlNewDoc(XMLSTRING("1.0")); + ctx->suite = xmlNewNode(NULL, XMLSTRING("testsuite")); + xmlChar *xml_suite_path = xmlEscapePropValue(suite_path); + xmlNewProp(ctx->suite, XMLSTRING("name"), xml_suite_path); + xmlFree(xml_suite_path); + xmlDocSetRootElement(ctx->doc, ctx->suite); + + reporter_start_suite(reporter, suitename, 0); +} + + +/* Accumulate output from the actual test (the "" nodes) in + a file since the tests usually are run in a child processes, so + there is no simple way to save output from it and then use it in + the parent (start_test() and finish_test() are run from parent) */ + +static FILE *child_output_tmpfile; +static xmlTextWriterPtr child_output_writer; + +static void xml_reporter_start_test(TestReporter *reporter, const char *testname) { + XmlMemo *memo = (XmlMemo *)reporter->memo; + struct xml_suite_context *ctx = &context_stack[context_stack_p-1]; + + ctx->curTest = xmlNewChild(ctx->suite, NULL, XMLSTRING("testcase"), NULL); + xmlChar *xml_testname = xmlEscapePropValue(testname); + xmlNewProp(ctx->curTest, XMLSTRING("name"), xml_testname); + xmlFree(xml_testname); + + memo->segment_count = reporter->breadcrumb->depth - 1; + suite_path[0] = '\0'; + walk_breadcrumb(reporter->breadcrumb, print_path_segment_walker, memo); + xmlChar* xml_suite_path = xmlEscapePropValue(suite_path); + xmlNewProp(ctx->curTest, XMLSTRING("classname"), xml_suite_path); + xmlFree(xml_suite_path); + + reporter_start_test(reporter, testname); + + child_output_tmpfile = tmpfile(); + xmlOutputBufferPtr tmpfileBuf + = xmlOutputBufferCreateFile(child_output_tmpfile, + xmlGetCharEncodingHandler(XML_CHAR_ENCODING_UTF8)); + child_output_writer = xmlNewTextWriter(tmpfileBuf); +} + + +static void xml_show_skip(TestReporter *reporter, const char *file, int line) { + (void)file; + (void)line; + (void)reporter; + + xmlTextWriterStartElement(child_output_writer, XMLSTRING("skipped")); + xmlTextWriterEndElement(child_output_writer); // + xmlTextWriterFlush(child_output_writer); +} + +static xmlChar* xml_secure_vprint(const char *format, va_list ap) +{ + char buf[100]; + char *msg = buf; + const size_t len = sizeof(buf); + + va_list ap_store; + va_copy(ap_store, ap); + int res = vsnprintf(msg, len, format, ap_store); + va_end(ap_store); + if (res < 0) { + fprintf(stderr, "vsnprintf failed in xml_secure_vprint: %s\n", strerror(res)); + exit(EXIT_FAILURE); + } + + if ((unsigned)res >= len) { + size_t msglen = res+1; + msg = malloc(msglen); + if (!msg) { + fprintf(stderr, "memory allocation failure in xml_secure_vprint\n"); + exit(EXIT_FAILURE); + } + res = vsnprintf(msg, msglen, format, ap); + if (res < 0) { + free(msg); + fprintf(stderr, "vsnprintf failed in xml_secure_vprint: %s\n", strerror(res)); + exit(EXIT_FAILURE); + } + if ((unsigned)res >= msglen) { + // What? Did format get longer while allocating msg? + free(msg); + fprintf(stderr, "vsnprintf failed in xml_secure_vprint: nondeterministic message length\n"); + exit(EXIT_FAILURE); + } + } + + xmlChar *xml_msg = xmlEscapePropValue(msg); + if (msg != buf) free(msg); + return xml_msg; +} + +static void xml_show_fail(TestReporter *reporter, const char *file, int line, + const char *message, va_list arguments) { + (void)reporter; + + xmlTextWriterStartElement(child_output_writer, XMLSTRING("failure")); + + xmlChar *xml_str = xml_secure_vprint(message, arguments); + xmlTextWriterWriteAttribute(child_output_writer, XMLSTRING("message"), xml_str); + xmlFree(xml_str); + + xmlTextWriterStartElement(child_output_writer, XMLSTRING("location")); + xmlChar *xml_file = xmlEscapePropValue(file); + xmlTextWriterWriteAttribute(child_output_writer, XMLSTRING("file"), xml_file); + xmlFree(xml_file); + xmlTextWriterWriteFormatAttribute(child_output_writer, XMLSTRING("line"), "%d", line); + xmlTextWriterEndElement(child_output_writer); // + xmlTextWriterEndElement(child_output_writer); // + xmlTextWriterFlush(child_output_writer); +} + +static xmlAttrPtr xmlFormatProp(xmlNodePtr node, const xmlChar* name, const char *format, ...) +{ + va_list vargs; + va_start(vargs, format); + xmlChar *xml_str = xml_secure_vprint(format, vargs); + va_end(vargs); + + xmlAttrPtr attr = xmlNewProp(node, name, xml_str); + xmlFree(xml_str); + + return attr; +} + +static void xml_show_incomplete(TestReporter *reporter, const char *filename, + int line, const char *message, va_list arguments) { + (void)reporter; + + struct xml_suite_context *ctx = &context_stack[context_stack_p-1]; + + xmlNodePtr errNode = xmlNewChild(ctx->curTest, NULL, XMLSTRING("error"), NULL); + xmlNewProp(errNode, XMLSTRING("type"), XMLSTRING("Fatal")); + if (message) { + xmlChar *xml_msg = xml_secure_vprint(message, arguments); + xmlNewProp(errNode, XMLSTRING("message"), xml_msg); + xmlFree(xml_msg); + } else { + xmlNewProp(errNode, XMLSTRING("message"), + XMLSTRING("Test terminated unexpectedly, likely from a non-standard exception or Posix signal")); + } + xmlNodePtr locNode = xmlNewChild(errNode, NULL, XMLSTRING("location"), NULL); + xmlChar *xml_filename = xmlEscapePropValue(filename); + xmlNewProp(locNode, XMLSTRING("file"), xml_filename); + xmlFree(xml_filename); + xmlFormatProp(locNode, XMLSTRING("line"), "%d", line); +} + +static void insert_child_results(struct xml_suite_context *ctx) { + char childData[4096]; + fseek(child_output_tmpfile, 0, SEEK_SET); + + size_t pos = 0, ret; + while (!feof(child_output_tmpfile)) { + ret = fread(childData+pos, 1, sizeof(childData)-pos, child_output_tmpfile); + if (ferror(child_output_tmpfile)) { + abort(); + } + pos += ret; + } + + fclose(child_output_tmpfile); + if (pos > 0) { + childData[pos] = '\0'; + + xmlNodePtr childLst; + if (xmlParseBalancedChunkMemoryRecover(ctx->doc, NULL, NULL, 0, XMLSTRING(childData), &childLst, 1) != 0) { + xmlNodePtr errNode = xmlNewChild(ctx->curTest, NULL, XMLSTRING("error"), NULL); + xmlNewProp(errNode, XMLSTRING("type"), XMLSTRING("Fatal")); + xmlNewProp(errNode, XMLSTRING("message"), + XMLSTRING("Test result XML truncated or malformed, " + "likely from abnormal process termination")); + } + + xmlAddChildList(ctx->curTest, childLst); + } +} + +static void xml_reporter_finish_test(TestReporter *reporter, const char *filename, + int line, const char *message) { + struct xml_suite_context *ctx = &context_stack[context_stack_p-1]; + + reporter_finish_test(reporter, filename, line, message); + xmlFreeTextWriter(child_output_writer); + + xmlFormatProp(ctx->curTest, XMLSTRING("time"), + "%.5f", (double)reporter->duration/(double)1000); + + ctx->suite_duration += reporter->duration; + insert_child_results(ctx); +} + +static void deleteEmpty(xmlNodePtr elem) +{ + while (elem != NULL) { + xmlNodePtr next = elem->next; + if (xmlIsBlankNode(elem)) { + xmlUnlinkNode(elem); + xmlFreeNode(elem); + } else if (elem->children) { + deleteEmpty(elem->children); + } + + elem = next; + } +} + +static int default_printer(xmlDocPtr doc) +{ + struct xml_suite_context *ctx = &context_stack[context_stack_p-1]; + xmlOutputBufferPtr outBuf + = xmlOutputBufferCreateFile(ctx->outFile, + xmlGetCharEncodingHandler(XML_CHAR_ENCODING_UTF8)); + xmlSaveFormatFileTo(outBuf, doc, "UTF-8", 1); + + return 0; +} + +static void xml_reporter_finish_suite(TestReporter *reporter, const char *filename, int line) { + XmlMemo *memo = (XmlMemo *)reporter->memo; + struct xml_suite_context *ctx = &context_stack[context_stack_p-1]; + + reporter_finish_suite(reporter, filename, line); + + reporter->total_passes += reporter->passes; + reporter->total_failures += reporter->failures; + reporter->total_skips += reporter->skips; + reporter->total_exceptions += reporter->exceptions; + + xmlFormatProp(ctx->suite, XMLSTRING("failures"), "%d", reporter->failures); + xmlFormatProp(ctx->suite, XMLSTRING("errors"), "%d", reporter->exceptions); + xmlFormatProp(ctx->suite, XMLSTRING("skipped"), "%d", reporter->skips); + xmlFormatProp(ctx->suite, XMLSTRING("time"), "%.5f", (double)ctx->suite_duration/(double)1000); + + deleteEmpty(ctx->suite); + memo->printer(ctx->doc); + xmlFreeDoc(ctx->doc); + if (context_stack_p > 1) { + context_stack[context_stack_p-2].suite_duration += ctx->suite_duration; + } + --context_stack_p; +} diff -Nru cgreen-1.3.0/src/libxml_reporter_internal.h cgreen-1.6.3/src/libxml_reporter_internal.h --- cgreen-1.3.0/src/libxml_reporter_internal.h 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/src/libxml_reporter_internal.h 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,19 @@ +#ifndef XML_REPORTER_INTERNAL_H +#define XML_REPORTER_INTERNAL_H + +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef int XmlPrinter(xmlDocPtr); +extern void set_libxml_reporter_printer(TestReporter *reporter, XmlPrinter *printer); + +#ifdef __cplusplus +} +#endif + +#endif diff -Nru cgreen-1.3.0/src/message_formatting.c cgreen-1.6.3/src/message_formatting.c --- cgreen-1.3.0/src/message_formatting.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/message_formatting.c 2023-10-03 15:16:52.000000000 +0000 @@ -165,18 +165,34 @@ } -static bool is_not_equal_to_string_constraint(Constraint *constraint) { - return strstr(constraint->name, "not ") != NULL && strstr(constraint->name, "equal ") != NULL; +static bool is_equal_to_string_constraint(Constraint *constraint) { + return strstr(constraint->name, "not ") == NULL || strstr(constraint->name, "equal ") == NULL; +} + +/* Formats for printing actual and expected values */ +static const char actual_value_string_format[] = "\n\t\tactual value:\t\t\t[\"%s\"]"; +static const char expected_value_string_format[] = "[%s]"; +static const char constraint_as_string_format[] = "Expected [%s] to [%s]"; +static const char at_offset[] = "\n\t\tat offset:\t\t\t[%d]"; +static const char expected_content[] = "\n\t\t\tactual value:\t\t[0x%02x]\n\t\t\texpected value:\t\t[0x%02x]"; + + +static void format_actual_string_value(intptr_t actual_value, char *message, size_t message_size) { + snprintf(message + strlen(message), message_size - strlen(message) - 1, + actual_value_string_format, + (const char *)actual_value); +} + + +static void format_expected_string_value(Constraint *constraint, char *message, size_t message_size) { + snprintf(message + strlen(message), message_size - strlen(message) - 1, + constraint->expected_value_message, + constraint->expected_value.value.string_value); } char *failure_message_for(Constraint *constraint, const char *actual_string, intptr_t actual_value) { char actual_int_value_string[32]; - const char *constraint_as_string_format = "Expected [%s] to [%s]"; - const char *expected_value_string_format = "[%s]"; - const char *actual_value_string_format = "\n\t\tactual value:\t\t\t[\"%s\"]"; - const char *at_offset = "\n\t\tat offset:\t\t\t[%d]"; - const char *expected_content = "\n\t\t\tactual value:\t\t[0x%02x]\n\t\t\texpected value:\t\t[0x%02x]"; const char *actual_value_as_string; char *message; size_t message_size = strlen(constraint_as_string_format) + @@ -232,14 +248,10 @@ /* for string constraints, print out the strings encountered and not their pointer values */ if (values_are_strings_in(constraint)) { - snprintf(message + strlen(message), message_size - strlen(message) - 1, - actual_value_string_format, - (const char *)actual_value); - if (!is_not_equal_to_string_constraint(constraint)) { + format_actual_string_value(actual_value, message, message_size); + if (is_equal_to_string_constraint(constraint)) { strcat(message, "\n"); - snprintf(message + strlen(message), message_size - strlen(message) - 1, - constraint->expected_value_message, - constraint->expected_value.value.string_value); + format_expected_string_value(constraint, message, message_size); } /* The final string may have percent characters, so, since it is later used in a (v)printf, we have to double them diff -Nru cgreen-1.3.0/src/mocks.c cgreen-1.6.3/src/mocks.c --- cgreen-1.3.0/src/mocks.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/mocks.c 2023-10-03 15:16:52.000000000 +0000 @@ -14,6 +14,7 @@ #include "parameters.h" #include "constraint_internal.h" #include "utils.h" +#include "runner.h" #ifdef __ANDROID__ @@ -187,7 +188,6 @@ CgreenVector *parameter_names; int failures_before_read_only_constraints_executed; int failures_after_read_only_constraints_executed; - int i; CgreenValue stored_result; RecordedExpectation *expectation = find_expectation(function); @@ -222,20 +222,17 @@ // return them. There should also be a 'mock_double_()' which does the same except // returning a double. - for (i = 0; i < cgreen_vector_size(expectation->constraints); i++) { + // First check all actual constraints for validity... + for (int i = 0; i < cgreen_vector_size(expectation->constraints); i++) { Constraint *constraint = (Constraint *)cgreen_vector_get(expectation->constraints, i); - if (is_side_effect_constraint(constraint)) { - apply_side_effect(test_reporter, expectation, constraint); - continue; - } - - if(constraint->type == CALL_COUNTER) { + if (constraint->type == CGREEN_CALL_COUNTER_CONSTRAINT) { expectation->number_times_called++; continue; } - if (!is_parameter(constraint)) continue; + if (!is_parameter(constraint)) + continue; if (!constraint_is_for_parameter_in(constraint, parameters)) { // if expectation parameter name isn't in parameter_names, @@ -249,11 +246,13 @@ } } - // if read-only constraints aren't matching, content-setting ones might corrupt memory - // so apply read-only ones first, and if they don't fail, then do the deeper constraints + // Now we can do the read-only constraints. If read-only + // constraints aren't matching, content-setting ones might corrupt + // memory so apply read-only ones first, and if they don't fail, + // then do the deeper constraints failures_before_read_only_constraints_executed = test_reporter->failures; - for (i = 0; i < cgreen_vector_size(parameter_names); i++) { + for (int i = 0; i < cgreen_vector_size(parameter_names); i++) { const char* parameter_name = (const char*)cgreen_vector_get(parameter_names, i); CgreenValue actual = *(CgreenValue*)cgreen_vector_get(actual_values, i); apply_any_read_only_parameter_constraints(expectation, parameter_name, actual, test_reporter); @@ -261,17 +260,30 @@ failures_after_read_only_constraints_executed = test_reporter->failures; + // And now we can do the content setting constraints... // FIXME: this comparison doesn't work because only parent // processes' pass/fail counts are updated, and even then // only once they read from the pipe if (failures_before_read_only_constraints_executed == failures_after_read_only_constraints_executed) { - for (i = 0; i < cgreen_vector_size(parameter_names); i++) { + for (int i = 0; i < cgreen_vector_size(parameter_names); i++) { const char* parameter_name = (const char*)cgreen_vector_get(parameter_names, i); CgreenValue actual = *(CgreenValue*)cgreen_vector_get(actual_values, i); apply_any_content_setting_parameter_constraints(expectation, parameter_name, actual, test_reporter); } } + // And finally run all side effects + for (int i = 0; i < cgreen_vector_size(expectation->constraints); i++) { + Constraint *constraint = (Constraint *)cgreen_vector_get(expectation->constraints, i); + + if (is_side_effect_constraint(constraint)) { + apply_side_effect(test_reporter, expectation, constraint); + continue; + } + } + + + destroy_cgreen_vector(parameter_names); destroy_cgreen_vector(actual_values); @@ -279,10 +291,11 @@ destroy_expectation_if_time_to_die(expectation); if (stored_result.type == CGREEN_DOUBLE) { -#ifdef V2 - /* TODO: for v2 we should ensure that the user is not trying to return a double - through 'mock()' when there is a 'mock_double()' available, which there isn't yet. - So then +#ifdef FUTURE + /* TODO: for some future version we should ensure that the + user is not trying to return a double through 'mock()' when + there is a 'mock_double()' available, which there isn't + yet. So then return unbox_double(mock(...)); @@ -313,8 +326,8 @@ } static void apply_side_effect(TestReporter *test_reporter, - const RecordedExpectation *expectation, - Constraint *constraint) + const RecordedExpectation *expectation, + Constraint *constraint) { CgreenValue actual = {}; constraint->execute( @@ -325,9 +338,10 @@ expectation->test_line, test_reporter); } -static -bool -is_side_effect_constraint(const Constraint *constraint) { return constraint->type == CALL; } + +static bool is_side_effect_constraint(const Constraint *constraint) { + return constraint->type == CGREEN_CALL_CONSTRAINT; +} static CgreenVector *create_vector_of_actuals(va_list actuals, int count) { int i; @@ -395,7 +409,7 @@ Constraint * time_constraint = create_constraint(); time_constraint->expected_value = make_cgreen_integer_value(number_times_called); time_constraint->expected_value_name = string_dup("times"); - time_constraint->type = CALL_COUNTER; + time_constraint->type = CGREEN_CALL_COUNTER_CONSTRAINT; time_constraint->compare = &compare_want_value; time_constraint->execute = &test_times_called; @@ -466,7 +480,7 @@ expectation->time_to_live = 1; for (int i = 0 ; i < cgreen_vector_size(expectation->constraints) ; i++) { Constraint * constraint = cgreen_vector_get(expectation->constraints, i); - if (constraint && constraint->type == CALL_COUNTER) { + if (constraint && constraint->type == CGREEN_CALL_COUNTER_CONSTRAINT) { expectation->time_to_live = (int)constraint->expected_value.value.integer_value; break; } @@ -599,8 +613,8 @@ test_reporter->assert_true( test_reporter, - expectation->test_file, - expectation->test_line, + current_test->filename, + current_test->line, false, message, expectation->function); @@ -787,7 +801,7 @@ bool call_counter_present = false; for (int c = 0; c < cgreen_vector_size(expectation->constraints); c++) { Constraint *constraint = (Constraint *) cgreen_vector_get(expectation->constraints, c); - if(constraint->type == CALL_COUNTER) { + if (constraint->type == CGREEN_CALL_COUNTER_CONSTRAINT) { constraint->execute( constraint, expectation->function, @@ -843,11 +857,14 @@ for (i = 0; i < cgreen_vector_size(expectation->constraints); i++) { Constraint *constraint = (Constraint *)cgreen_vector_get(expectation->constraints, i); - if (constraint_is_not_for_parameter(constraint, parameter)) { + if (is_content_setting(constraint) || is_side_effect_constraint(constraint)) { continue; } - if (constraint->type == CONTENT_SETTER) { + /* TODO: we need to classify constraints better, now this + filters on COMPARING to not execute RETURN constraints + here...*/ + if (constraint_is_not_for_parameter(constraint, parameter)) { continue; } @@ -861,16 +878,17 @@ } } -static void apply_any_content_setting_parameter_constraints(RecordedExpectation *expectation, const char *parameter, CgreenValue actual, TestReporter* test_reporter) { - int i; - for (i = 0; i < cgreen_vector_size(expectation->constraints); i++) { +static void apply_any_content_setting_parameter_constraints(RecordedExpectation *expectation, + const char *parameter, CgreenValue actual, + TestReporter* test_reporter) { + for (int i = 0; i < cgreen_vector_size(expectation->constraints); i++) { Constraint *constraint = (Constraint *)cgreen_vector_get(expectation->constraints, i); if (constraint_is_not_for_parameter(constraint, parameter)) { continue; } - if (constraint->type != CONTENT_SETTER) { + if (constraint->type != CGREEN_CONTENT_SETTER_CONSTRAINT) { continue; } @@ -885,17 +903,28 @@ } static CgreenValue stored_result_or_default_for(CgreenVector* constraints) { - int i; - for (i = 0; i < cgreen_vector_size(constraints); i++) { + for (int i = 0; i < cgreen_vector_size(constraints); i++) { Constraint *constraint = (Constraint *)cgreen_vector_get(constraints, i); - if (constraint->type == RETURN_VALUE) { - return constraint->expected_value; - } else if (constraint->type == RETURN_POINTER) { + switch (constraint->type) { + case CGREEN_RETURN_VALUE_CONSTRAINT: + case CGREEN_RETURN_POINTER_CONSTRAINT: return constraint->expected_value; + case CGREEN_RETURN_BY_VALUE_CONSTRAINT: { + /* When returning a struct by value we need to copy the struct + pointed out by the Cgreen pointer value so that it does + not get lost when the constraint is destroyed. The user + will be responsible for deallocating the copy of the struct. */ + CgreenValue returnable = constraint->expected_value; + void *the_struct = malloc(returnable.value_size); + memcpy(the_struct, returnable.value.pointer_value, returnable.value_size); + returnable.value.pointer_value = the_struct; + return returnable; + } + default: + break; } } - return (CgreenValue){CGREEN_INTEGER, {0}, 0}; } @@ -904,8 +933,7 @@ } static bool have_always_expectation_for(const char* function) { - int i; - for (i = 0; i < cgreen_vector_size(global_expectation_queue); i++) { + for (int i = 0; i < cgreen_vector_size(global_expectation_queue); i++) { RecordedExpectation *expectation = (RecordedExpectation *)cgreen_vector_get(global_expectation_queue, i); if (strcmp(expectation->function, function) == 0) { @@ -923,8 +951,7 @@ } static bool have_never_call_expectation_for(const char* function) { - int i; - for (i = 0; i < cgreen_vector_size(global_expectation_queue); i++) { + for (int i = 0; i < cgreen_vector_size(global_expectation_queue); i++) { RecordedExpectation *expectation = (RecordedExpectation *)cgreen_vector_get(global_expectation_queue, i); if (strcmp(expectation->function, function) == 0) { @@ -938,8 +965,7 @@ } static bool remove_never_call_expectation_for(const char* function) { - int i; - for (i = 0; i < cgreen_vector_size(global_expectation_queue); i++) { + for (int i = 0; i < cgreen_vector_size(global_expectation_queue); i++) { RecordedExpectation *expectation = (RecordedExpectation *)cgreen_vector_get(global_expectation_queue, i); if (strcmp(expectation->function, function) == 0) { diff -Nru cgreen-1.3.0/src/posix_cgreen_time.c cgreen-1.6.3/src/posix_cgreen_time.c --- cgreen-1.3.0/src/posix_cgreen_time.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/posix_cgreen_time.c 2023-10-03 15:16:52.000000000 +0000 @@ -8,7 +8,7 @@ #endif // #ifdef __ANDROID__ /* TODO: this should really be handle by CMake config... */ -#if defined(__FreeBSD__) || defined(__linux__) || defined(__APPLE__) || defined(__CYGWIN__) +#if defined(__FreeBSD__) || defined(__linux__) || defined(__APPLE__) || defined(__CYGWIN__) || defined(__OpenBSD__) # include # define HAVE_GETTIMEOFDAY 1 #else @@ -16,7 +16,7 @@ #endif #if defined(HAVE_GETTIMEOFDAY) -uint32_t cgreen_time_get_current_milliseconds() { +uint32_t cgreen_time_get_current_milliseconds(void) { #ifdef __CYGWIN__ /* TODO: This is actually the POSIX recommended way to do this */ struct timespec ts; @@ -37,5 +37,4 @@ } #endif - /* vim: set ts=4 sw=4 et cindent: */ diff -Nru cgreen-1.3.0/src/reporter.c cgreen-1.6.3/src/reporter.c --- cgreen-1.3.0/src/reporter.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/reporter.c 2023-10-03 15:16:52.000000000 +0000 @@ -29,7 +29,7 @@ int result, const char *message, ...); static int read_reporter_results(TestReporter *reporter); -TestReporter *get_test_reporter() { +TestReporter *get_test_reporter(void) { return context.reporter; } @@ -44,7 +44,7 @@ reporter->options = options; } -TestReporter *create_reporter() { +TestReporter *create_reporter(void) { CgreenBreadcrumb *breadcrumb; TestReporter *reporter = (TestReporter *) malloc(sizeof(TestReporter)); if (reporter == NULL) { diff -Nru cgreen-1.3.0/src/runner.c cgreen-1.6.3/src/runner.c --- cgreen-1.3.0/src/runner.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/runner.c 2023-10-03 15:16:52.000000000 +0000 @@ -17,8 +17,10 @@ #include "cgreen/internal/android_headers/androidcompat.h" #endif // #ifdef __ANDROID__ +// Export the current running test +CgreenTest *current_test; -static const char* CGREEN_PER_TEST_TIMEOUT_ENVIRONMENT_VARIABLE = "CGREEN_PER_TEST_TIMEOUT"; +static const char *CGREEN_PER_TEST_TIMEOUT_ENVIRONMENT_VARIABLE = "CGREEN_PER_TEST_TIMEOUT"; static void run_every_test(TestSuite *suite, TestReporter *reporter); static void run_named_test(TestSuite *suite, const char *name, TestReporter *reporter); @@ -29,21 +31,25 @@ static int per_test_timeout_value(void); static void validate_per_test_timeout_value(void); -int run_test_suite(TestSuite *suite, TestReporter *reporter) { +int run_test_suite(TestSuite *suite, TestReporter *reporter) +{ int success; - if (per_test_timeout_defined()) { + if (per_test_timeout_defined()) + { validate_per_test_timeout_value(); } setup_reporting(reporter); run_every_test(suite, reporter); - success = (reporter->total_failures == 0) && (reporter->total_exceptions==0); + success = (reporter->total_failures == 0) && (reporter->total_exceptions == 0); return success ? EXIT_SUCCESS : EXIT_FAILURE; } -int run_single_test(TestSuite *suite, const char *name, TestReporter *reporter) { +int run_single_test(TestSuite *suite, const char *name, TestReporter *reporter) +{ int success; - if (per_test_timeout_defined()) { + if (per_test_timeout_defined()) + { validate_per_test_timeout_value(); } @@ -53,7 +59,8 @@ return success ? EXIT_SUCCESS : EXIT_FAILURE; } -static void run_every_test(TestSuite *suite, TestReporter *reporter) { +static void run_every_test(TestSuite *suite, TestReporter *reporter) +{ int i; run_specified_test_if_child(suite, reporter); @@ -62,8 +69,10 @@ (*reporter->start_suite)(reporter, suite->name, count_tests(suite)); // Run sub-suites first - for (i = 0; i < suite->size; i++) { - if (suite->tests[i].type != test_function) { + for (i = 0; i < suite->size; i++) + { + if (suite->tests[i].type != test_function) + { (*suite->setup)(); run_every_test(suite->tests[i].Runnable.suite, reporter); (*suite->teardown)(); @@ -79,8 +88,10 @@ uint32_t test_starting_milliseconds = cgreen_time_get_current_milliseconds(); // Run top-level tests - for (i = 0; i < suite->size; i++) { - if (suite->tests[i].type == test_function) { + for (i = 0; i < suite->size; i++) + { + if (suite->tests[i].type == test_function) + { if (getenv("CGREEN_NO_FORK") == NULL) run_test_in_its_own_process(suite, suite->tests[i].Runnable.test, reporter); else @@ -89,22 +100,25 @@ } reporter->duration = cgreen_time_duration_in_milliseconds(test_starting_milliseconds, - cgreen_time_get_current_milliseconds()); + cgreen_time_get_current_milliseconds()); reporter->total_duration = cgreen_time_duration_in_milliseconds(total_test_starting_milliseconds, - cgreen_time_get_current_milliseconds()); + cgreen_time_get_current_milliseconds()); send_reporter_completion_notification(reporter); (*reporter->finish_suite)(reporter, suite->filename, suite->line); } -static void run_named_test(TestSuite *suite, const char *name, TestReporter *reporter) { +static void run_named_test(TestSuite *suite, const char *name, TestReporter *reporter) +{ int i; uint32_t total_test_starting_milliseconds = cgreen_time_get_current_milliseconds(); (*reporter->start_suite)(reporter, suite->name, count_tests(suite)); - for (i = 0; i < suite->size; i++) { + for (i = 0; i < suite->size; i++) + { if (suite->tests[i].type != test_function && - has_test(suite->tests[i].Runnable.suite, name)) { + has_test(suite->tests[i].Runnable.suite, name)) + { (*suite->setup)(); run_named_test(suite->tests[i].Runnable.suite, name, reporter); (*suite->teardown)(); @@ -119,49 +133,58 @@ uint32_t test_starting_milliseconds = cgreen_time_get_current_milliseconds(); - for (i = 0; i < suite->size; i++) { - if (suite->tests[i].type == test_function) { - if (strcmp(suite->tests[i].name, name) == 0) { + for (i = 0; i < suite->size; i++) + { + if (suite->tests[i].type == test_function) + { + if (strcmp(suite->tests[i].name, name) == 0) + { run_test_in_the_current_process(suite, suite->tests[i].Runnable.test, reporter); } } } reporter->duration = cgreen_time_duration_in_milliseconds(test_starting_milliseconds, - cgreen_time_get_current_milliseconds()); + cgreen_time_get_current_milliseconds()); reporter->total_duration = cgreen_time_duration_in_milliseconds(total_test_starting_milliseconds, - cgreen_time_get_current_milliseconds()); + cgreen_time_get_current_milliseconds()); send_reporter_completion_notification(reporter); (*reporter->finish_suite)(reporter, suite->filename, suite->line); } - -static void run_test_in_the_current_process(TestSuite *suite, CgreenTest *test, TestReporter *reporter) { +static void run_test_in_the_current_process(TestSuite *suite, CgreenTest *test, TestReporter *reporter) +{ uint32_t test_starting_milliseconds = cgreen_time_get_current_milliseconds(); (*reporter->start_test)(reporter, test->name); - if (test->skip) { + if (test->skip) + { send_reporter_skipped_notification(reporter); - } else { + } + else + { run_the_test_code(suite, test, reporter); reporter->duration = cgreen_time_duration_in_milliseconds(test_starting_milliseconds, - cgreen_time_get_current_milliseconds()); + cgreen_time_get_current_milliseconds()); send_reporter_completion_notification(reporter); } (*reporter->finish_test)(reporter, test->filename, test->line, NULL); } -static int per_test_timeout_defined() { +static int per_test_timeout_defined(void) +{ return getenv(CGREEN_PER_TEST_TIMEOUT_ENVIRONMENT_VARIABLE) != NULL; } -static int per_test_timeout_value() { - char* timeout_string; +static int per_test_timeout_value(void) +{ + char *timeout_string; int timeout_value; - if (!per_test_timeout_defined()) { + if (!per_test_timeout_defined()) + { die("attempt to fetch undefined value for %s\n", CGREEN_PER_TEST_TIMEOUT_ENVIRONMENT_VARIABLE); } @@ -171,43 +194,59 @@ return timeout_value; } -static void validate_per_test_timeout_value() { +static void validate_per_test_timeout_value(void) +{ int timeout = per_test_timeout_value(); - if (timeout <= 0) { + if (timeout <= 0) + { die("invalid value for %s environment variable: %d\n", CGREEN_PER_TEST_TIMEOUT_ENVIRONMENT_VARIABLE, timeout); } } -static void run_setup_for(CgreenTest *spec) { +static void run_setup_for(CgreenTest *spec) +{ #ifdef __cplusplus std::string message = "an exception was thrown during setup: "; - try { + try + { #endif spec->context->setup(); #ifdef __cplusplus return; - } catch(const std::exception& exception) { + } + catch (const std::exception &exception) + { message += '['; message += exception.what(); message += ']'; - } catch(const std::exception* exception) { + } + catch (const std::exception *exception) + { message += '['; message += exception->what(); message += ']'; - } catch(const std::string& exception_message) { + } + catch (const std::string &exception_message) + { message += '['; message += exception_message; message += ']'; - } catch(const std::string *exception_message) { + } + catch (const std::string *exception_message) + { message += '['; message += *exception_message; message += ']'; - } catch(const char *exception_message) { + } + catch (const char *exception_message) + { message += '['; message += exception_message; message += ']'; - } catch (...) { + } + catch (...) + { message += "unknown exception type"; } va_list no_arguments; @@ -218,35 +257,49 @@ #endif } -static void run_teardown_for(CgreenTest *spec) { +static void run_teardown_for(CgreenTest *spec) +{ #ifdef __cplusplus std::string message = "an exception was thrown during teardown: "; - try { + try + { #endif spec->context->teardown(); #ifdef __cplusplus return; - } catch(const std::exception& exception) { + } + catch (const std::exception &exception) + { message += '['; message += exception.what(); message += ']'; - } catch(const std::exception* exception) { + } + catch (const std::exception *exception) + { message += '['; message += exception->what(); message += ']'; - } catch(const std::string& exception_message) { + } + catch (const std::string &exception_message) + { message += '['; message += exception_message; message += ']'; - } catch(const std::string *exception_message) { + } + catch (const std::string *exception_message) + { message += '['; message += *exception_message; message += ']'; - } catch(const char *exception_message) { + } + catch (const char *exception_message) + { message += '['; message += exception_message; message += ']'; - } catch (...) { + } + catch (...) + { message += "unknown exception type"; } va_list no_arguments; @@ -264,35 +317,50 @@ documented as a good place to put a breakpoint. Do not change the name or semantics of this function, it should continue to be very close to the test code. */ -static void run(CgreenTest *spec) { +static void run(CgreenTest *spec) +{ #ifdef __cplusplus std::string message = "an exception was thrown during test: "; - try { + try + { #endif + current_test = spec; spec->run(); #ifdef __cplusplus return; - } catch(const std::exception& exception) { + } + catch (const std::exception &exception) + { message += '['; message += exception.what(); message += ']'; - } catch(const std::exception* exception) { + } + catch (const std::exception *exception) + { message += '['; message += exception->what(); message += ']'; - } catch(const std::string& exception_message) { + } + catch (const std::string &exception_message) + { message += '['; message += exception_message; message += ']'; - } catch(const std::string *exception_message) { + } + catch (const std::string *exception_message) + { message += '['; message += *exception_message; message += ']'; - } catch(const char *exception_message) { + } + catch (const char *exception_message) + { message += '['; message += exception_message; message += ']'; - } catch (...) { + } + catch (...) + { message += "unknown exception type"; } va_list no_arguments; @@ -303,31 +371,41 @@ #endif } -void run_the_test_code(TestSuite *suite, CgreenTest *spec, TestReporter *reporter) { +void run_the_test_code(TestSuite *suite, CgreenTest *spec, TestReporter *reporter) +{ significant_figures_for_assert_double_are(8); clear_mocks(); - if (per_test_timeout_defined()) { + if (per_test_timeout_defined()) + { validate_per_test_timeout_value(); die_in(per_test_timeout_value()); } // for historical reasons the suite can have a setup - if(has_setup(suite)) { + if (has_setup(suite)) + { (*suite->setup)(); - } else { - if (spec->context->setup != NULL) { + } + else + { + if (spec->context->setup != NULL) + { run_setup_for(spec); } } run(spec); // for historical reasons the suite can have a teardown - if (has_teardown(suite)) { + if (has_teardown(suite)) + { (*suite->teardown)(); - } else { - if (spec->context->teardown != NULL) { + } + else + { + if (spec->context->teardown != NULL) + { run_teardown_for(spec); } } @@ -335,7 +413,8 @@ tally_mocks(reporter); } -void die(const char *message, ...) { +void die(const char *message, ...) +{ va_list arguments; va_start(arguments, message); vprintf(message, arguments); diff -Nru cgreen-1.3.0/src/runner.h cgreen-1.6.3/src/runner.h --- cgreen-1.3.0/src/runner.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/runner.h 2023-10-03 15:16:52.000000000 +0000 @@ -1 +1,3 @@ #include + +extern CgreenTest *current_test; diff -Nru cgreen-1.3.0/src/suite.c cgreen-1.6.3/src/suite.c --- cgreen-1.3.0/src/suite.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/suite.c 2023-10-03 15:16:52.000000000 +0000 @@ -1,3 +1,4 @@ +#include #include #include #include @@ -13,7 +14,7 @@ CgreenContext defaultContext = { /* name */ "", - /* filename */ __FILE__, + /* filename */ FILENAME, /* setup */ &do_nothing, /* teardown */ &do_nothing }; diff -Nru cgreen-1.3.0/src/text_reporter.c cgreen-1.6.3/src/text_reporter.c --- cgreen-1.3.0/src/text_reporter.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/text_reporter.c 2023-10-03 15:16:52.000000000 +0000 @@ -279,6 +279,8 @@ const char *message, va_list arguments) { TextMemo *memo = (TextMemo *)reporter->memo; + if (have_quiet_mode(reporter)) + memo->printer("\n"); /* To break line of "....." for error message parsing */ memo->printer("%s:%d: ", file, line); memo->printer("Exception: "); diff -Nru cgreen-1.3.0/src/utils.h cgreen-1.6.3/src/utils.h --- cgreen-1.3.0/src/utils.h 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/utils.h 2023-10-03 15:16:52.000000000 +0000 @@ -1,6 +1,7 @@ #ifndef UTILS_HEADER #define UTILS_HEADER +#include #include #ifdef __cplusplus @@ -10,7 +11,7 @@ extern bool panic_use_colours; -#define PANIC(...) panic(__FILE__, __LINE__, __VA_ARGS__) +#define PANIC(...) panic(FILENAME, __LINE__, __VA_ARGS__) extern char *string_dup(const char *original); extern void panic_set_output_buffer(const char *buffer); extern void panic(const char *filename, int line, const char *fmt, ...); diff -Nru cgreen-1.3.0/src/xml_reporter.c cgreen-1.6.3/src/xml_reporter.c --- cgreen-1.3.0/src/xml_reporter.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/src/xml_reporter.c 2023-10-03 15:16:52.000000000 +0000 @@ -5,6 +5,7 @@ #include #include +#include "utils.h" #include "xml_reporter_internal.h" @@ -128,7 +129,9 @@ walk_breadcrumb(reporter->breadcrumb, strcat_path_segment, &segment_decrementer); add_suite_name(suitename); - snprintf(filename, sizeof(filename), "%s-%s.xml", file_prefix, suite_path); + if (snprintf(filename, sizeof(filename), "%s-%s.xml", file_prefix, suite_path) < 0) + PANIC("Error when creating output filename"); + if (memo->printer == fprintf) { // If we're really printing to files, then open one... out = fopen(filename, "w"); @@ -190,14 +193,44 @@ fputs(output, child_output_tmpfile); } +static void xml_concat_escaped_message(const char *message, va_list arguments) { + char buffer[1000]; + snprintf(buffer, sizeof(buffer)/sizeof(buffer[0]), message, arguments); + + size_t current_char_position = 0; + for (; current_char_position < strlen(buffer); current_char_position++) { + switch (buffer[current_char_position]) { + case '"': + output = concat(output, """); + break; + case '&': + output = concat(output, "&"); + break; + case '<': + output = concat(output, "<"); + break; + case '>': + output = concat(output, ">"); + break; + case '\'': + output = concat(output, "'"); + break; + default: { + char single_char[2] = {0}; + single_char[0] = buffer[current_char_position]; + output = concat(output, single_char); + } + } + } +} + static void xml_show_fail(TestReporter *reporter, const char *file, int line, const char *message, va_list arguments) { char buffer[1000]; output = concat(output, indent(reporter)); output = concat(output, "\n"); output = concat(output, indent(reporter)); @@ -217,7 +250,7 @@ output = concat(output, indent(reporter)); output = concat(output, "\n"); output = concat(output, indent(reporter)); diff -Nru cgreen-1.3.0/tests/all_c_tests.c cgreen-1.6.3/tests/all_c_tests.c --- cgreen-1.3.0/tests/all_c_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/all_c_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -27,6 +27,7 @@ TestSuite *unit_tests(void); TestSuite *vector_tests(void); TestSuite *xml_reporter_tests(void); +TestSuite *libxml_reporter_tests(void); int main(int argc, char **argv) { int suite_result; @@ -51,7 +52,12 @@ add_suite(suite, text_reporter_tests()); add_suite(suite, unit_tests()); add_suite(suite, vector_tests()); +#if HAVE_XML_REPORTER add_suite(suite, xml_reporter_tests()); +#endif +#if HAVE_LIBXML2_REPORTER + add_suite(suite, libxml_reporter_tests()); +#endif if (argc > 1) { suite_result = run_single_test(suite, argv[1], reporter); diff -Nru cgreen-1.3.0/tests/assertion_messages_tests.expected cgreen-1.6.3/tests/assertion_messages_tests.expected --- cgreen-1.3.0/tests/assertion_messages_tests.expected 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/assertion_messages_tests.expected 2023-10-03 15:16:52.000000000 +0000 @@ -1,50 +1,50 @@ Running "assertion_messages_tests" (7 tests)... -assertion_messages_tests.c: Failure: AssertionMessage -> for_actual_with_percent +assertion_messages_tests.c:000: Failure: AssertionMessage -> for_actual_with_percent Expected [strlen("%d")] to [equal] [3] actual value: [2] expected value: [3] -assertion_messages_tests.c: Failure: AssertionMessage -> for_compare_area_to_null +assertion_messages_tests.c:000: Failure: AssertionMessage -> for_compare_area_to_null Wanted to compare contents of [null_pointer] but it had a value of NULL. If you want to explicitly check for null, use the is_null constraint instead. -assertion_messages_tests.c: Failure: AssertionMessage -> for_compare_null_to_area +assertion_messages_tests.c:000: Failure: AssertionMessage -> for_compare_null_to_area Wanted to compare contents with [area], but NULL was used for the pointer we wanted to compare to. If you want to explicitly check for null, use the is_null constraint instead. -assertion_messages_tests.c: Failure: AssertionMessage -> for_comparing_content_with_negative_length +assertion_messages_tests.c:000: Failure: AssertionMessage -> for_comparing_content_with_negative_length Wanted to compare contents with [something], but [-4] was given for the comparison size. -assertion_messages_tests.c: Failure: AssertionMessage -> for_using_double_constraints_with_assert_that +assertion_messages_tests.c:000: Failure: AssertionMessage -> for_using_double_constraints_with_assert_that Constraints of double type, such as [equal double], should only be used with 'assert_that_double()' to ensure proper comparison. -assertion_messages_tests.c: Failure: AssertionMessage -> for_using_double_constraints_with_assert_that +assertion_messages_tests.c:000: Failure: AssertionMessage -> for_using_double_constraints_with_assert_that Expected [3] to [equal double] [3.1415926] -assertion_messages_tests.c: Failure: AssertionMessage -> for_using_double_constraints_with_assert_that +assertion_messages_tests.c:000: Failure: AssertionMessage -> for_using_double_constraints_with_assert_that Constraints of double type, such as [be less than double], should only be used with 'assert_that_double()' to ensure proper comparison. -assertion_messages_tests.c: Failure: AssertionMessage -> for_using_double_constraints_with_assert_that +assertion_messages_tests.c:000: Failure: AssertionMessage -> for_using_double_constraints_with_assert_that Constraints of double type, such as [be greater than double], should only be used with 'assert_that_double()' to ensure proper comparison. -assertion_messages_tests.c: Failure: AssertionMessage -> for_using_double_constraints_with_assert_that +assertion_messages_tests.c:000: Failure: AssertionMessage -> for_using_double_constraints_with_assert_that Expected [7] to [be greater than double] [3.1415926] -assertion_messages_tests.c: Failure: AssertionMessage -> for_using_non_double_constraints_with_assert_that_double +assertion_messages_tests.c:000: Failure: AssertionMessage -> for_using_non_double_constraints_with_assert_that_double Only constraints of double type should be used with 'assert_that_double()'. Other types of constraints, such as [equal], will probably fail comparison. -assertion_messages_tests.c: Failure: AssertionMessage -> for_using_non_double_constraints_with_assert_that_double +assertion_messages_tests.c:000: Failure: AssertionMessage -> for_using_non_double_constraints_with_assert_that_double Expected [3] to [equal] [3] within [8] significant figures actual value: [3.000000] expected value: [0.000000] -assertion_messages_tests.c: Failure: AssertionMessage -> return_value_constraints_are_not_allowed +assertion_messages_tests.c:000: Failure: AssertionMessage -> return_value_constraints_are_not_allowed Got constraint of type [return value], but they are not allowed for assertions, only in mock expectations. diff -Nru cgreen-1.3.0/tests/assertion_tests.c cgreen-1.6.3/tests/assertion_tests.c --- cgreen-1.3.0/tests/assertion_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/assertion_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -15,6 +15,10 @@ assert_that(1, is_true); } +Ensure(integer_one_should_assert_true_short) { + assert_that(1); +} + Ensure(integer_two_should_assert_true) { assert_that(2, is_true); } diff -Nru cgreen-1.3.0/tests/cdash_reporter_tests.c cgreen-1.6.3/tests/cdash_reporter_tests.c --- cgreen-1.3.0/tests/cdash_reporter_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/cdash_reporter_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -35,7 +35,7 @@ (void)stream; /* Unused */ char buffer[10000]; - vsprintf(buffer, format, ap); + vsnprintf(buffer, sizeof(buffer), format, ap); output = concat(output, buffer); return strlen(output); diff -Nru cgreen-1.3.0/tests/CMakeLists.txt cgreen-1.6.3/tests/CMakeLists.txt --- cgreen-1.3.0/tests/CMakeLists.txt 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/CMakeLists.txt 2023-10-03 15:16:52.000000000 +0000 @@ -45,14 +45,20 @@ message_formatting_tests.c messaging_tests.c mocks_tests.c + mocks_struct_tests.c parameters_tests.c reflective_runner_no_teardown_tests.c reflective_tests.c text_reporter_tests.c unit_tests.c vector_tests.c - xml_reporter_tests.c ) +if (CGREEN_WITH_XML) + LIST(APPEND c_tests_library_SRCS xml_reporter_tests.c) +endif (CGREEN_WITH_XML) +if (CGREEN_WITH_LIBXML2) + LIST(APPEND c_tests_library_SRCS libxml_reporter_tests.c) +endif (CGREEN_WITH_LIBXML2) SET_SOURCE_FILES_PROPERTIES(${c_tests_library_SRCS} PROPERTIES LANGUAGE C) set(CGREEN_C_TESTS_LIBRARY @@ -61,7 +67,11 @@ ) add_library(${CGREEN_C_TESTS_LIBRARY} SHARED ${c_tests_library_SRCS}) -target_link_libraries(${CGREEN_C_TESTS_LIBRARY} ${CGREEN_LIBRARY}) +target_link_libraries(${CGREEN_C_TESTS_LIBRARY} ${CGREEN_LIBRARY} + $<$:${LIBXML2_LIBRARIES}>) +include_directories($<$:${LIBXML2_INCLUDE_DIRS}>) +macro_add_valgrind_test(${CGREEN_C_TESTS_LIBRARY}) + set(c_tests_SRCS all_c_tests.c @@ -69,14 +79,20 @@ ) SET_SOURCE_FILES_PROPERTIES(${c_tests_SRCS} PROPERTIES LANGUAGE C) -set(TEST_TARGET_LIBRARIES ${CGREEN_LIBRARY}) +set(TEST_TARGET_LIBRARIES ${CGREEN_LIBRARY} + $<$:${LIBXML2_LIBRARIES}>) # unit test with main program runner macro_add_unit_test(test_cgreen_c "${c_tests_SRCS}" "${TEST_TARGET_LIBRARIES}") macro_add_test(NAME test_cgreen_c_run_named_test COMMAND test_cgreen_c integer_one_should_assert_true) +set(cgreen_runner_args) +if (CGREEN_WITH_XML) + set(cgreen_runner_args -x TEST) +endif (CGREEN_WITH_XML) + # run them with cgreen-runner also -macro_add_test(NAME runner_test_cgreen_c COMMAND cgreen-runner -x TEST ./${CMAKE_SHARED_LIBRARY_PREFIX}${CGREEN_C_TESTS_LIBRARY}${CMAKE_SHARED_LIBRARY_SUFFIX}) +macro_add_test(NAME runner_test_cgreen_c COMMAND cgreen-runner ${cgreen_runner_args} ./${CMAKE_SHARED_LIBRARY_PREFIX}${CGREEN_C_TESTS_LIBRARY}${CMAKE_SHARED_LIBRARY_SUFFIX}) # C++ tests, library to use with runner, and a main program @@ -100,6 +116,8 @@ ) add_library(${CGREEN_CPP_TESTS_LIBRARY} SHARED ${cpp_tests_library_SRCS}) target_link_libraries(${CGREEN_CPP_TESTS_LIBRARY} ${CGREEN_LIBRARY} ${CMAKE_CXX_IMPLICIT_LINK_LIBRARIES}) +macro_add_valgrind_test(${CGREEN_CPP_TESTS_LIBRARY}) + set(cpp_tests_SRCS all_cpp_tests.cpp @@ -109,7 +127,7 @@ macro_add_unit_test(test_cgreen_cpp "${cpp_tests_SRCS}" "${TEST_TARGET_LIBRARIES}" ${CMAKE_CXX_IMPLICIT_LINK_LIBRARIES}) macro_add_test(NAME test_cgreen_cpp_run_named_test COMMAND test_cgreen_cpp different_pointers_with_same_contents_should_assert_equal) -macro_add_test(NAME runner_test_cgreen_cpp COMMAND cgreen-runner -x TEST ./${CMAKE_SHARED_LIBRARY_PREFIX}${CGREEN_CPP_TESTS_LIBRARY}${CMAKE_SHARED_LIBRARY_SUFFIX}) +macro_add_test(NAME runner_test_cgreen_cpp COMMAND cgreen-runner ${cgreen_runner_args} ./${CMAKE_SHARED_LIBRARY_PREFIX}${CGREEN_CPP_TESTS_LIBRARY}${CMAKE_SHARED_LIBRARY_SUFFIX}) # Temporary single CUT test library # Maybe we should do this for all tests instead @@ -127,13 +145,13 @@ message_formatting messaging mocks + mocks_struct parameters reflective reflective_runner_no_teardown text_reporter unit vector - xml_reporter ) set(${case}_tests_SRCS ${case}_tests.c @@ -144,6 +162,14 @@ add_library(${${case}_tests_library} SHARED ${${case}_tests_SRCS}) target_link_libraries(${${case}_tests_library} ${CGREEN_LIBRARY}) endforeach(case) +if (CGREEN_WITH_XML) + add_library(xml_reporter_tests SHARED xml_reporter_tests.c) + target_link_libraries(xml_reporter_tests ${CGREEN_LIBRARY}) +endif (CGREEN_WITH_XML) +if (CGREEN_WITH_LIBXML2) + add_library(libxml_reporter_tests SHARED libxml_reporter_tests.c) + target_link_libraries(libxml_reporter_tests ${CGREEN_LIBRARY}) +endif (CGREEN_WITH_LIBXML2) # Libraries for a set of output comparing tests to run with runner @@ -178,13 +204,21 @@ add_library(${ignore_messages_library} SHARED ${ignore_messages_library_SRCS}) target_link_libraries(${ignore_messages_library} ${CGREEN_LIBRARY}) -set(xml_output_library xml_output_tests) -set(xml_output_library_SRCS xml_output_tests.c) -add_library(${xml_output_library} SHARED ${xml_output_library_SRCS}) -target_link_libraries(${xml_output_library} ${CGREEN_LIBRARY}) - -set(TEST_TARGET_LIBRARIES ${CGREEN_LIBRARY}) +if (CGREEN_WITH_XML) + set(xml_output_library xml_output_tests) + set(xml_output_library_SRCS xml_output_tests.c) + add_library(${xml_output_library} SHARED ${xml_output_library_SRCS}) + target_link_libraries(${xml_output_library} ${CGREEN_LIBRARY}) +endif (CGREEN_WITH_XML) + +if (CGREEN_WITH_LIBXML2) + set(libxml_output_library libxml_output_tests) + set(libxml_output_library_SRCS libxml_output_tests.c) + add_library(${libxml_output_library} SHARED ${libxml_output_library_SRCS}) + target_link_libraries(${libxml_output_library} ${CGREEN_LIBRARY}) +endif (CGREEN_WITH_LIBXML2) +# TODO We should not add these if the cgreen-runner was not built e.g. no 'nm' available macro_add_test( NAME constraint_messsages COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/../tools/cgreen_runner_output_diff @@ -230,12 +264,23 @@ ${ignore_messages_library}.expected ) -macro_add_test(NAME xml_output - COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/../tools/cgreen_xml_output_diff - xml_output_tests # Name - ${CMAKE_CURRENT_SOURCE_DIR} # Where sources are - ${xml_output_library}.expected -) +if (CGREEN_WITH_XML) + macro_add_test(NAME xml_output + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/../tools/cgreen_xml_output_diff + xml_output_tests # Name + ${CMAKE_CURRENT_SOURCE_DIR} # Where sources are + ${xml_output_library}.expected + ) +endif (CGREEN_WITH_XML) + +if (CGREEN_WITH_LIBXML2) + macro_add_test(NAME libxml_output + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/../tools/cgreen_libxml_output_diff + libxml_output_tests + ${CMAKE_CURRENT_SOURCE_DIR} + ${libxml_output_library}.expected + ) +endif (CGREEN_WITH_LIBXML2) # add verification that all public api is available as it should add_subdirectory(api) diff -Nru cgreen-1.3.0/tests/constraint_messages_tests.c cgreen-1.6.3/tests/constraint_messages_tests.c --- cgreen-1.3.0/tests/constraint_messages_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/constraint_messages_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -39,7 +39,7 @@ Ensure(ConstraintMessage, for_is_equal_to_hex) { unsigned char bytes[4]; memset(bytes, 0xaa, sizeof(bytes)); - assert_that(bytes[0], is_equal_to_hex(0xbb)); + assert_that((unsigned char) bytes[0], is_equal_to_hex(0xbb)); } Ensure(ConstraintMessage, for_is_not_equal_to) { @@ -60,8 +60,8 @@ // Contents of struct/memory Ensure(ConstraintMessage, for_is_equal_to_contents_of) { - int forty_five[45] = {45, 44, 43}, thirty_three[33] = {45, 44, 33}; - assert_that(thirty_three, is_equal_to_contents_of(forty_five, 55)); + char forty_five[45] = {45, 44, 43}, thirty_three[33] = {45, 44, 33}; + assert_that(thirty_three, is_equal_to_contents_of(forty_five, 45)); } Ensure(ConstraintMessage, for_is_not_equal_to_contents_of) { diff -Nru cgreen-1.3.0/tests/constraint_messages_tests.expected cgreen-1.6.3/tests/constraint_messages_tests.expected --- cgreen-1.3.0/tests/constraint_messages_tests.expected 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/constraint_messages_tests.expected 2023-10-03 15:16:52.000000000 +0000 @@ -1,162 +1,162 @@ Running "constraint_messages_tests" (37 tests)... -constraint_messages_tests.c: Failure: ConstraintMessage -> for_always_followed_by_expectation +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_always_followed_by_expectation Mocked function [some_mock] already has an expectation that it will always be called a certain way; any expectations declared after an always expectation are invalid -constraint_messages_tests.c: Failure: ConstraintMessage -> for_assert_that +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_assert_that Expected [0 == 1] to [be true] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_begins_with_string +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_begins_with_string Expected [does_not_begin_with_forty_five] to [begin with string] [forty_five] actual value: ["this string does not begin with fortyfive"] expected to begin with: ["fortyfive"] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_contains_string +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_contains_string Expected [not_containing_forty_five] to [contain string] [forty_five] actual value: ["this text is thirtythree"] expected to contain: ["fortyfive"] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_does_not_begin_with_string +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_does_not_begin_with_string Expected [begins_with_forty_five] to [not begin with string] [forty_five] actual value: ["fortyfive is the start of this"] expected to not begin with: ["fortyfive"] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_does_not_contain_string +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_does_not_contain_string Expected [contains_forty_five] to [not contain string] [forty_five] actual value: ["this string is fortyfive"] expected to not contain: ["fortyfive"] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_does_not_end_with_string +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_does_not_end_with_string Expected [ends_with_forty_five] to [not end with string] [forty_five] actual value: ["this string ends with fortyfive"] expected to not end with: ["fortyfive"] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_ends_with_string +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_ends_with_string Expected [does_not_end_with_forty_five] to [end with string] [forty_five] actual value: ["fortyfive is not the end of this string"] expected to end with: ["fortyfive"] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_equal_to_double +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_equal_to_double Expected [0] to [equal double] [1] within [8] significant figures actual value: [0.000000] expected value: [1.000000] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_equal_to_double_negative +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_equal_to_double_negative Expected [-1] to [equal double] [-2] within [8] significant figures actual value: [-1.000000] expected value: [-2.000000] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_equal_to +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_equal_to Expected [forty_five] to [equal] [thirty_three] actual value: [45] expected value: [33] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_equal_to_contents_of +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_equal_to_contents_of Expected [thirty_three] to [equal contents of] [forty_five] - at offset: [8] + at offset: [2] actual value: [0x21] expected value: [0x2b] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_equal_to_double +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_equal_to_double Expected [four_point_five] to [equal double] [three_point_three] within [8] significant figures actual value: [4.500000] expected value: [3.300000] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_equal_to_hex - Expected [bytes[0]] to [equal] [0xbb] +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_equal_to_hex + Expected [(unsigned char) bytes[0]] to [equal] [0xbb] actual value: [0xaa] expected value: [0xbb] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_equal_to_string +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_equal_to_string Expected [thirty_three] to [equal string] [forty_five] actual value: ["this string is thirtythree"] expected to equal: ["this string is fortyfive"] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_greater_than +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_greater_than Expected [thirty_three] to [be greater than] [forty_five] actual value: [33] expected to be greater than: [45] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_greater_than_double +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_greater_than_double Expected [three_point_three] to [be greater than double] [four_point_five] within [8] significant figures actual value: [3.300000] expected value: [4.500000] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_greater_than_double_with_accuracy +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_greater_than_double_with_accuracy Expected [1.0] to [be greater than double] [1.0 + 1.0e-3 + DBL_EPSILON] within [4] significant figures actual value: [1.000000] expected value: [1.001000] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_less_than +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_less_than Expected [forty_five] to [be less than] [thirty_three] actual value: [45] expected to be less than: [33] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_less_than_double +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_less_than_double Expected [four_point_five] to [be less than double] [three_point_three] within [8] significant figures actual value: [4.500000] expected value: [3.300000] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_less_than_double_with_accuracy +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_less_than_double_with_accuracy Expected [1.0] to [be less than double] [1.0 - 1.0e-3 - DBL_EPSILON] within [4] significant figures actual value: [1.000000] expected value: [0.999000] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_non_null +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_non_null Expected [pointer] to [be non null] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_not_equal_to +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_not_equal_to Expected [should_not_be_forty_five] to [not equal] [forty_five] actual value: [45] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_not_equal_to_contents_of +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_not_equal_to_contents_of Expected [forty_five_and_up] to [not equal contents of] [another_forty_five_and_up] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_not_equal_to_double +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_not_equal_to_double Expected [four_point_five] to [not equal double] [almost_four_point_five] within [4] significant figures actual value: [4.500000] expected value: [4.499900] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_not_equal_to_string +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_not_equal_to_string Expected [another_forty_five] to [not equal string] [forty_five] actual value: ["this string is fortyfive"] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_is_null +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_is_null Expected [pointer] to [be null] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_mock_called_more_times_than_expected +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_mock_called_more_times_than_expected Mocked function [some_mock] was called too many times -constraint_messages_tests.c: Failure: ConstraintMessage -> for_mock_called_with_unexpected_parameter_value +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_mock_called_with_unexpected_parameter_value Expected [[parameter] parameter in [some_mock]] to [equal] [1] actual value: [0] expected value: [1] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_mock_called_without_expectation +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_mock_called_without_expectation Mocked function [some_mock] did not have an expectation that it would be called -constraint_messages_tests.c: Failure: ConstraintMessage -> for_mock_parameter_name_not_matching_constraint_parameter_name +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_mock_parameter_name_not_matching_constraint_parameter_name Mocked function [some_mock] did not define a parameter named [PARAMETER]. Did you misspell it in the expectation or forget it in the mock's argument list? -constraint_messages_tests.c: Failure: ConstraintMessage -> for_no_mock_parameters_with_parameter_constraint +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_no_mock_parameters_with_parameter_constraint Mocked function [forgot_to_pass_parameters_mock] did not define a parameter named [x]. Did you misspell it in the expectation or forget it in the mock's argument list? -constraint_messages_tests.c: Failure: ConstraintMessage -> for_not_equal_to_double +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_not_equal_to_double Expected [0] to [not equal double] [0] within [8] significant figures actual value: [0.000000] expected value: [0.000000] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_not_equal_to_double_negative +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_not_equal_to_double_negative Expected [-1] to [not equal double] [-1] within [8] significant figures actual value: [-1.000000] expected value: [-1.000000] -constraint_messages_tests.c: Failure: ConstraintMessage -> for_violated_never_expect +constraint_messages_tests.c:000: Failure: ConstraintMessage -> for_violated_never_expect Mocked function [some_mock] has an expectation that it will never be called, but it was -constraint_messages_tests.c: Exception: ConstraintMessage -> increments_exception_count_when_terminating_via_SIGQUIT +constraint_messages_tests.c:000: Exception: ConstraintMessage -> increments_exception_count_when_terminating_via_SIGQUIT Test terminated with signal: Quit -constraint_messages_tests.c: Exception: ConstraintMessage -> increments_exception_count_when_terminating_via_SIGTERM +constraint_messages_tests.c:000: Exception: ConstraintMessage -> increments_exception_count_when_terminating_via_SIGTERM Test terminated with signal: Terminated "ConstraintMessage": 35 failures, 2 exceptions in 0ms. diff -Nru cgreen-1.3.0/tests/constraint_tests.c cgreen-1.6.3/tests/constraint_tests.c --- cgreen-1.3.0/tests/constraint_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/constraint_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -61,7 +61,7 @@ Ensure(Constraint, parameter_name_matches_correctly) { Constraint *constraint = create_constraint(); - constraint->type = VALUE_COMPARER; + constraint->type = CGREEN_VALUE_COMPARER_CONSTRAINT; constraint->parameter_name = "label"; assert_that(constraint_is_not_for_parameter(constraint, "wrong_label"), is_true); @@ -124,6 +124,47 @@ destroy_constraint(equals_string_hello_constraint); } +Ensure(Constraint, matching_beginning_of_string) { + Constraint *beginning_of_string_hello_constraint = + create_begins_with_string_constraint("Hell", "user_greeting"); + + assert_that(compare_string_constraint(beginning_of_string_hello_constraint, "Hello"), is_true); + assert_that(compare_string_constraint(beginning_of_string_hello_constraint, "Goodbye"), is_false); + + destroy_constraint(beginning_of_string_hello_constraint); +} + +Ensure(Constraint, not_matching_beginning_of_string) { + Constraint *beginning_of_string_hello_constraint = + create_does_not_begin_with_string_constraint("Goodby", "user_greeting"); + + assert_that(compare_string_constraint(beginning_of_string_hello_constraint, "Hello"), is_true); + assert_that(compare_string_constraint(beginning_of_string_hello_constraint, "Goodbye"), is_false); + + destroy_constraint(beginning_of_string_hello_constraint); +} + +Ensure(Constraint, matching_end_of_string) { + Constraint *end_of_string_hello_constraint = + create_ends_with_string_constraint("ello", "user_greeting"); + + assert_that(compare_string_constraint(end_of_string_hello_constraint, "Hello"), is_true); + assert_that(compare_string_constraint(end_of_string_hello_constraint, "Goodbye"), is_false); + assert_that(compare_string_constraint(end_of_string_hello_constraint, "Hello ello"), is_true); + + destroy_constraint(end_of_string_hello_constraint); +} + +Ensure(Constraint, not_matching_end_of_string) { + Constraint *end_of_string_hello_constraint = + create_does_not_end_with_string_constraint("oodbye", "user_greeting"); + + assert_that(compare_string_constraint(end_of_string_hello_constraint, "Hello"), is_true); + assert_that(compare_string_constraint(end_of_string_hello_constraint, "Goodbye"), is_false); + + destroy_constraint(end_of_string_hello_constraint); +} + Ensure(Constraint, matching_null_string_against_non_null_string) { Constraint *equals_string_hello_constraint = create_equal_to_string_constraint("Hello", "user_greeting"); @@ -243,14 +284,19 @@ TestSuite *suite = create_test_suite(); add_test_with_context(suite, Constraint, default_destroy_clears_state); add_test_with_context(suite, Constraint, parameter_name_matches_correctly); + add_test_with_context(suite, Constraint, compare_contents_is_correct_on_larger_than_intptr_array); add_test_with_context(suite, Constraint, compare_is_correct_when_using_integers); + add_test_with_context(suite, Constraint, compare_to_is_null_correctly); add_test_with_context(suite, Constraint, string_constraint_destroy_clears_state); add_test_with_context(suite, Constraint, matching_strings_as_equal); + add_test_with_context(suite, Constraint, matching_beginning_of_string); + add_test_with_context(suite, Constraint, not_matching_beginning_of_string); + add_test_with_context(suite, Constraint, matching_end_of_string); + add_test_with_context(suite, Constraint, not_matching_end_of_string); add_test_with_context(suite, Constraint, matching_null_string_against_non_null_string); add_test_with_context(suite, Constraint, matching_against_null_string); add_test_with_context(suite, Constraint, matching_doubles_as_equal_with_default_significance); add_test_with_context(suite, Constraint, matching_doubles_respects_significant_figure_setting); - add_test_with_context(suite, Constraint, compare_contents_is_correct_on_larger_than_intptr_array); add_test_with_context(suite, Constraint, compare_equal_to_contents_is_false_on_null); add_test_with_context(suite, Constraint, compare_not_equal_to_contents_is_false_on_null); add_test_with_context(suite, Constraint, can_compare_to_hex); diff -Nru cgreen-1.3.0/tests/custom_constraint_messages_tests.c cgreen-1.6.3/tests/custom_constraint_messages_tests.c --- cgreen-1.3.0/tests/custom_constraint_messages_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/custom_constraint_messages_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -21,7 +21,7 @@ } Constraint static_is_bigger_than_5 = { - /* .type */ VALUE_COMPARER, + /* .type */ CGREEN_VALUE_COMPARER_CONSTRAINT, /* .name */ "be bigger than 5", /* .destroy */ destroy_static_constraint, /* .compare */ compare_want_greater_than_5, @@ -59,7 +59,7 @@ constraint->expected_value = make_cgreen_integer_value(expected_value); constraint->expected_value_name = string_dup(expected_value_name); - constraint->type = VALUE_COMPARER; + constraint->type = CGREEN_VALUE_COMPARER_CONSTRAINT; constraint->compare = &compare_want_smaller_value; constraint->execute = &test_want; @@ -81,19 +81,19 @@ Using custom data types. */ -typedef struct Box { - int id; +typedef struct Struct { + char id; int size; -} Box; +} Struct; typedef struct Piece { - int id; + char id; int size; } Piece; bool compare_piece_and_box_size(Constraint *constraint, CgreenValue actual) { return ((Piece *)actual.value.pointer_value)->size - < ((Box*)constraint->expected_value.value.pointer_value)->size ; + < ((Struct*)constraint->expected_value.value.pointer_value)->size ; } static void test_fit_piece(Constraint *constraint, const char *function_name, CgreenValue actual, @@ -105,7 +105,7 @@ (*constraint->compare)(constraint, actual), "Piece [%f], does not fit in [%f] in function [%s] parameter [%s]", ((Piece *)constraint->expected_value.value.pointer_value)->id, - ((Box *)actual.value.pointer_value)->id, + ((Struct *)actual.value.pointer_value)->id, function_name, constraint->parameter_name); } @@ -115,7 +115,7 @@ constraint->expected_value = make_cgreen_pointer_value((void*)expected_value); constraint->expected_value_name = string_dup(expected_value_name); - constraint->type = CONTENT_COMPARER; + constraint->type = CGREEN_CONTENT_COMPARER_CONSTRAINT; constraint->compare = &compare_piece_and_box_size; constraint->execute = &test_fit_piece; @@ -127,7 +127,7 @@ #define can_fit_in_box(box) create_piece_fit_in_box_constraint((intptr_t)box, #box) Ensure(CustomConstraint, more_complex_custom_constraint_function) { - Box box1 = {.id = 1, .size = 5}; - Piece piece99 = {.id = 99, .size = 6}; + Struct box1 = {.id = (char)1, .size = 5}; + Piece piece99 = {.id = (char)99, .size = 6}; assert_that(&piece99, can_fit_in_box(&box1)); } diff -Nru cgreen-1.3.0/tests/custom_constraint_messages_tests.expected cgreen-1.6.3/tests/custom_constraint_messages_tests.expected --- cgreen-1.3.0/tests/custom_constraint_messages_tests.expected 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/custom_constraint_messages_tests.expected 2023-10-03 15:16:52.000000000 +0000 @@ -1,11 +1,11 @@ Running "custom_constraint_messages_tests" (3 tests)... -custom_constraint_messages_tests.c: Failure: CustomConstraint -> custom_constraint_using_a_function_with_arguments_function +custom_constraint_messages_tests.c:000: Failure: CustomConstraint -> custom_constraint_using_a_function_with_arguments_function Expected [19] to [be smaller than] [10] -custom_constraint_messages_tests.c: Failure: CustomConstraint -> custom_constraint_using_static_function +custom_constraint_messages_tests.c:000: Failure: CustomConstraint -> custom_constraint_using_static_function Expected [1] to [be bigger than 5] -custom_constraint_messages_tests.c: Failure: CustomConstraint -> more_complex_custom_constraint_function +custom_constraint_messages_tests.c:000: Failure: CustomConstraint -> more_complex_custom_constraint_function Expected [&piece99] to [fit in box] [&box1] at offset: [0] actual value: [0x63] diff -Nru cgreen-1.3.0/tests/cute_reporter_tests.c cgreen-1.6.3/tests/cute_reporter_tests.c --- cgreen-1.3.0/tests/cute_reporter_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/cute_reporter_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -36,7 +36,7 @@ static int mocked_vprintf(const char *format, va_list arguments) { char buffer[10000]; - vsprintf(buffer, format, arguments); + vsnprintf(buffer, sizeof(buffer), format, arguments); output = concat(output, buffer); return strlen(output); diff -Nru cgreen-1.3.0/tests/failure_messages_tests.expected cgreen-1.6.3/tests/failure_messages_tests.expected --- cgreen-1.3.0/tests/failure_messages_tests.expected 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/failure_messages_tests.expected 2023-10-03 15:16:52.000000000 +0000 @@ -1,8 +1,8 @@ Running "failure_messages_tests" (2 tests)... -failure_messages_tests.c: Exception: FailureMessage -> for_CGREEN_PER_TEST_TIMEOUT +failure_messages_tests.c:000: Exception: FailureMessage -> for_CGREEN_PER_TEST_TIMEOUT Test terminated unexpectedly, likely from a non-standard exception or Posix signal -failure_messages_tests.c: Exception: FailureMessage -> for_time_out_in_only_one_second +failure_messages_tests.c:000: Exception: FailureMessage -> for_time_out_in_only_one_second Test terminated unexpectedly, likely from a non-standard exception or Posix signal "FailureMessage": 1 pass, 2 exceptions in 0ms. diff -Nru cgreen-1.3.0/tests/ignore_messages_tests.expected cgreen-1.6.3/tests/ignore_messages_tests.expected --- cgreen-1.3.0/tests/ignore_messages_tests.expected 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/ignore_messages_tests.expected 2023-10-03 15:16:52.000000000 +0000 @@ -1,8 +1,8 @@ Running "ignore_messages_tests" (5 tests)... -ignore_messages_tests.c: Exception: IgnoreMessage -> should_not_count_exceptions_as_ignored +ignore_messages_tests.c:000: Exception: IgnoreMessage -> should_not_count_exceptions_as_ignored Test terminated with signal: Segmentation fault -ignore_messages_tests.c: Failure: IgnoreMessage -> should_not_count_failing_tests_as_ignored +ignore_messages_tests.c:000: Failure: IgnoreMessage -> should_not_count_failing_tests_as_ignored Expected [0] to [be true] "IgnoreMessage": 1 pass, 1 skipped, 1 failure, 1 exception in 0ms. diff -Nru cgreen-1.3.0/tests/libxml_output_tests.c cgreen-1.6.3/tests/libxml_output_tests.c --- cgreen-1.3.0/tests/libxml_output_tests.c 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/tests/libxml_output_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,9 @@ +#include + +Ensure(failing_test_is_listed_by_libxml_reporter) { + assert_that(false); +} + +Ensure(passing_test_is_listed_by_libxml_reporter) { + assert_that(true); +} diff -Nru cgreen-1.3.0/tests/libxml_output_tests.expected cgreen-1.6.3/tests/libxml_output_tests.expected --- cgreen-1.3.0/tests/libxml_output_tests.expected 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/tests/libxml_output_tests.expected 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru cgreen-1.3.0/tests/libxml_reporter_tests.c cgreen-1.6.3/tests/libxml_reporter_tests.c --- cgreen-1.3.0/tests/libxml_reporter_tests.c 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/tests/libxml_reporter_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,395 @@ +#include +#include +#include +#include "cgreen_value_internal.h" +#include "constraint_internal.h" + +#include +#include +#include +#include + +#include + +#ifdef __cplusplus +using namespace cgreen; +#endif + + +#include "libxml_reporter_internal.h" + +#define XMLSTRING(x) (BAD_CAST x) + +static const int line=666; +static xmlChar *output = NULL; + +static void clear_output(void) +{ + if (NULL != output) { + xmlFree(output); + } + output = NULL; +} + +static int mocked_xmlout(xmlDocPtr doc) { + if (output) { + xmlFree(output); + } + + xmlDocDumpMemoryEnc(doc, &output, NULL, "UTF-8"); + return 0; +} + +static TestReporter *reporter; + +static void setup_xml_reporter_tests(void) { + reporter = create_libxml_reporter("PREFIX"); + + // We can not use setup_reporting() since we are running + // inside a test suite which needs the real reporting + // So we'll have to set up the messaging explicitly + reporter->ipc = start_cgreen_messaging(667); + + clear_output(); + set_libxml_reporter_printer(reporter, mocked_xmlout); +} + +static void teardown_xml_reporter_tests(void) { + //bad mojo when running tests in same process, as destroy_reporter also sets + //context.reporter = NULL, thus breaking the next test to run + destroy_reporter(reporter); + if (NULL != output) { + free(output); + //need to set output to NULL to avoid second free in + //subsequent call to setup_xml_reporter_tests->clear_output + //when running tests in same process + output = NULL; + } +} + +static xmlChar* getAttribute(xmlNodePtr node, const xmlChar* name) { + xmlAttr *attr = node->properties; + while (attr) { + if (xmlStrEqual(attr->name, name)) { + return xmlNodeGetContent((xmlNode*)attr); + } + attr = attr->next; + } + + return NULL; +} + +static bool hasAttribute(xmlNodePtr node, const xmlChar* name) { + xmlAttr *attr = node->properties; + while (attr) { + if (xmlStrEqual(attr->name, name)) { + return true; + } + attr = attr->next; + } + + return false; +} + +struct xmlNode_has_attribute_equal_to { + xmlChar *attr, *value; +}; + +static bool compare_xmlNode_has_attribute_equal_to(Constraint *constraint, + CgreenValue actual) { + xmlNodePtr actualNode = (xmlNodePtr)actual.value.pointer_value; + struct xmlNode_has_attribute_equal_to *expected = + (struct xmlNode_has_attribute_equal_to*)constraint->expected_value.value.pointer_value; + xmlChar* actualValue = getAttribute(actualNode, expected->attr); + + bool ret = xmlStrEqual(actualValue, expected->value); + xmlFree(actualValue); + return ret; +} + +static char *failure_message_xmlNode_has_attribute_equal_to( + Constraint *constraint, const char *actual_string, intptr_t actual_value) { + struct xmlNode_has_attribute_equal_to *expected = + (struct xmlNode_has_attribute_equal_to*)constraint->expected_value.value.pointer_value; + xmlChar* actualValue = getAttribute((xmlNodePtr)actual_value, expected->attr); + + const char *message_template = "Expected attribute [%s] of [%s] to [equal] [%s]\n" + "\tactual value:\t\t[%s]\n" + "\texpected to equal:\t[%s]\n"; + size_t msglen = xmlStrlen(expected->attr) + strlen(actual_string) + + strlen(constraint->expected_value_name) + xmlStrlen(actualValue) + + xmlStrlen(expected->value) + strlen(message_template); + char *message = (char*)malloc(msglen); + if (!message) { + xmlFree(actualValue); + return NULL; + } + + if (snprintf(message, msglen, message_template, + expected->attr, actual_string, constraint->expected_value_name, + actualValue, expected->value) >= (ssize_t)msglen) { + xmlFree(actualValue); + free(message); + return NULL; + } + + xmlFree(actualValue); + return message; +} + +static void destroy_xmlNode_has_attribute_equal_to(Constraint* constraint) { + struct xmlNode_has_attribute_equal_to *expected = + (struct xmlNode_has_attribute_equal_to*)constraint->expected_value.value.pointer_value; + xmlFree(expected->attr); + xmlFree(expected->value); + free(expected); + destroy_empty_constraint(constraint); +} + +static Constraint *create_xmlNode_has_attribute_equal_to(const xmlChar* attr, + const xmlChar *value, + const char* expected_value_name) +{ + Constraint *constraint = create_constraint(); + if (!constraint) + return NULL; + + struct xmlNode_has_attribute_equal_to *expected = + (struct xmlNode_has_attribute_equal_to *)malloc(sizeof(struct xmlNode_has_attribute_equal_to)); + if (!expected) { + constraint->destroy(constraint); + return NULL; + } + + expected->attr = xmlStrdup(attr); + expected->value = xmlStrdup(value); + + constraint->expected_value = make_cgreen_pointer_value(expected); + constraint->expected_value_name = strdup(expected_value_name); + constraint->type = CGREEN_STRING_COMPARER_CONSTRAINT; + + constraint->compare = &compare_xmlNode_has_attribute_equal_to; + constraint->failure_message = &failure_message_xmlNode_has_attribute_equal_to; + constraint->name = "have attribute with value"; + constraint->size_of_expected_value = sizeof(intptr_t); + constraint->destroy = &destroy_xmlNode_has_attribute_equal_to; + + return constraint; +} + +#define xmlnode_has_attribute_equal_to(attr, value) \ + create_xmlNode_has_attribute_equal_to(XMLSTRING(attr), XMLSTRING(value), #value) + +static xmlNodePtr xmlnode_find_sibling(xmlNodePtr node, const char *name) +{ + while (node) { + if (xmlStrEqual(node->name, XMLSTRING(name))) + break; + node = xmlNextElementSibling(node); + } + return node; +} + +Describe(LibXmlReporter); +BeforeEach(LibXmlReporter) { + setup_xml_reporter_tests(); +} +AfterEach(LibXmlReporter) { + teardown_xml_reporter_tests(); +} + + +Ensure(LibXmlReporter, will_report_beginning_of_suite) { + reporter->start_suite(reporter, "suite_name", 2); + reporter->finish_suite(reporter, "filename", line); + + xmlDocPtr doc = xmlParseDoc(output); + assert_that(doc, is_not_null); + + xmlNodePtr testsuite = xmlDocGetRootElement(doc); + assert_that(testsuite->name, is_equal_to_string("testsuite")); + assert_that(xmlChildElementCount(testsuite), is_equal_to(0)); + assert_that(testsuite, xmlnode_has_attribute_equal_to("name", "suite_name")); + xmlFreeDoc(doc); +} + + +static void reporter_show_pass_vargs(TestReporter *reporter, const char *name, int line, + const char *format, ...) +{ + va_list vargs; + va_start(vargs, format); + reporter->show_pass(reporter, name, line, format, vargs); + va_end(vargs); +} + +Ensure(LibXmlReporter, will_report_beginning_and_successful_finishing_of_passing_test) { + reporter->start_suite(reporter, "suite_name", 2); + reporter->start_test(reporter, "test_name"); + reporter_show_pass_vargs(reporter, "file", 2, "test_name"); + send_reporter_completion_notification(reporter); + reporter->finish_test(reporter, "filename", line, NULL); + reporter->finish_suite(reporter, "filename", line); + + xmlDocPtr doc = xmlParseDoc(output); + assert_that(doc, is_not_null); + + xmlNodePtr testsuite = xmlDocGetRootElement(doc); + assert_that(xmlChildElementCount(testsuite), is_equal_to(1)); + + xmlNodePtr testcase = xmlFirstElementChild(testsuite); + assert_that(testcase->name, is_equal_to_string("testcase")); + assert_that(xmlChildElementCount(testcase), is_equal_to(0)); + assert_that(testcase, xmlnode_has_attribute_equal_to("name", "test_name")); + assert_that(testcase, xmlnode_has_attribute_equal_to("classname", "suite_name")); +} + + +static void reporter_show_fail_vargs(TestReporter *reporter, const char *name, int line, + const char *format, ...) +{ + va_list vargs; + va_start(vargs, format); + reporter->show_fail(reporter, name, line, format, vargs); + va_end(vargs); +} + +Ensure(LibXmlReporter, will_report_a_failing_test) { + reporter->start_suite(reporter, "suite_name", 2); + reporter->start_test(reporter, "test_name"); + reporter_show_fail_vargs(reporter, "file", 2, "test_name"); + send_reporter_completion_notification(reporter); + reporter->finish_test(reporter, "filename", line, NULL); + reporter->finish_suite(reporter, "filename", line); + + xmlDocPtr doc = xmlParseDoc(output); + assert_that(doc, is_not_null); + + xmlNodePtr testsuite = xmlDocGetRootElement(doc); + assert_that(xmlChildElementCount(testsuite), is_equal_to(1)); + + xmlNodePtr testcase = xmlFirstElementChild(testsuite); + assert_that(testcase->name, is_equal_to_string("testcase")); + assert_that(xmlChildElementCount(testcase), is_equal_to(1)); + assert_that(hasAttribute(testcase, XMLSTRING("time")), is_true); + + xmlNodePtr failure = xmlFirstElementChild(testcase); + assert_that(failure->name, is_equal_to_string("failure")); + assert_that(xmlChildElementCount(failure), is_equal_to(1)); + assert_that(failure, xmlnode_has_attribute_equal_to("message", "test_name")); + + xmlNodePtr location = xmlFirstElementChild(failure); + assert_that(location->name, is_equal_to_string("location")); + assert_that(xmlChildElementCount(location), is_equal_to(0)); + assert_that(location, xmlnode_has_attribute_equal_to("file", "file")); + assert_that(location, xmlnode_has_attribute_equal_to("line", "2")); +} + +Ensure(LibXmlReporter, will_report_a_failing_test_only_once) { + va_list null_arguments; + memset(&null_arguments, 0, sizeof(null_arguments)); + + reporter->start_suite(reporter, "suite_name", 2); + reporter->start_test(reporter, "test_name"); + reporter->show_fail(reporter, "file", 2, "test_failure_message", null_arguments); + reporter->show_fail(reporter, "file", 2, "other_message", null_arguments); + reporter->finish_test(reporter, "filename", line, NULL); + reporter->finish_suite(reporter, "filename", line); + + xmlDocPtr doc = xmlParseDoc(output); + xmlNodePtr testsuite = xmlDocGetRootElement(doc); + xmlNodePtr testcase = xmlFirstElementChild(testsuite); + + static const char FAILURE[] = "failure"; + xmlNodePtr failure = xmlnode_find_sibling(xmlFirstElementChild(testcase), FAILURE); + + assert_that(failure->name, is_equal_to_string(FAILURE)); + assert_that(failure, xmlnode_has_attribute_equal_to("message", "test_failure_message")); + + failure = xmlnode_find_sibling(xmlNextElementSibling(failure), FAILURE); + assert_that(failure, xmlnode_has_attribute_equal_to("message", "other_message")); + + failure = xmlnode_find_sibling(xmlNextElementSibling(failure), FAILURE); + assert_that(failure, is_null); +} + +Ensure(LibXmlReporter, will_mark_ignored_test_as_skipped) { + const int line = 666; + + reporter->start_suite(reporter, "suite_name", 1); + reporter->start_test(reporter, "skipped_test_name"); + send_reporter_skipped_notification(reporter); + reporter->finish_test(reporter, "filename", line, "message"); + reporter->finish_suite(reporter, "filename", line); + + xmlDocPtr doc = xmlParseDoc(output); + assert_that(doc, is_not_null); + + xmlNodePtr testsuite = xmlDocGetRootElement(doc); + assert_that(xmlChildElementCount(testsuite), is_equal_to(1)); + + xmlNodePtr testcase = xmlFirstElementChild(testsuite); + assert_that(testcase->name, is_equal_to_string("testcase")); + assert_that(xmlChildElementCount(testcase), is_equal_to(1)); + assert_that(hasAttribute(testcase, XMLSTRING("time")), is_true); + + xmlNodePtr skipped = xmlFirstElementChild(testcase); + assert_that(skipped->name, is_equal_to_string("skipped")); + assert_that(xmlChildElementCount(skipped), is_equal_to(0)); +} + + +Ensure(LibXmlReporter, will_report_non_finishing_test) { + const int line = 666; + + reporter->start_suite(reporter, "suite_name", 1); + reporter->start_test(reporter, "test_name"); + send_reporter_exception_notification(reporter); + reporter->finish_test(reporter, "filename", line, "message"); + reporter->finish_suite(reporter, "filename", line); + + xmlDocPtr doc = xmlParseDoc(output); + assert_that(doc, is_not_null); + + xmlNodePtr testsuite = xmlDocGetRootElement(doc); + assert_that(xmlChildElementCount(testsuite), is_equal_to(1)); + + xmlNodePtr testcase = xmlFirstElementChild(testsuite); + assert_that(testcase->name, is_equal_to_string("testcase")); + assert_that(xmlChildElementCount(testcase), is_equal_to(1)); + assert_that(hasAttribute(testcase, XMLSTRING("time")), is_true); + + xmlNodePtr error = xmlFirstElementChild(testcase); + assert_that(error->name, is_equal_to_string("error")); + assert_that(error, xmlnode_has_attribute_equal_to("type", "Fatal")); + assert_that(error, xmlnode_has_attribute_equal_to("message", "message")); +} + +Ensure(LibXmlReporter, will_report_time_correctly_for_non_finishing_test) { + const int line = 666; + + reporter->start_suite(reporter, "suite_name", 1); + reporter->start_test(reporter, "test_name"); + send_reporter_exception_notification(reporter); + reporter->finish_test(reporter, "filename", line, "message"); + reporter->finish_suite(reporter, "filename", line); + + assert_that(output, contains_string("name=\"test_name\"")); + assert_that(output, contains_string(" time=\"")); +} + +TestSuite *libxml_reporter_tests(void) { + TestSuite *suite = create_test_suite(); + set_setup(suite, setup_xml_reporter_tests); + + add_test_with_context(suite, LibXmlReporter, will_report_beginning_of_suite); + add_test_with_context(suite, LibXmlReporter, will_report_beginning_and_successful_finishing_of_passing_test); + add_test_with_context(suite, LibXmlReporter, will_report_a_failing_test); + add_test_with_context(suite, LibXmlReporter, will_report_a_failing_test_only_once); + add_test_with_context(suite, LibXmlReporter, will_mark_ignored_test_as_skipped); + add_test_with_context(suite, LibXmlReporter, will_report_non_finishing_test); + add_test_with_context(suite, LibXmlReporter, will_report_time_correctly_for_non_finishing_test); + + set_teardown(suite, teardown_xml_reporter_tests); + return suite; +} diff -Nru cgreen-1.3.0/tests/libxml_reporter_tests.cpp cgreen-1.6.3/tests/libxml_reporter_tests.cpp --- cgreen-1.3.0/tests/libxml_reporter_tests.cpp 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/tests/libxml_reporter_tests.cpp 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,12 @@ +/* + This file used to be a link to the corresponding .c file because we + want to compile the same tests for C and C++. But since some systems + don't handle symbolic links the same way as *ix systems we get + inconsistencies (looking at you Cygwin) or plain out wrong (looking + at you MSYS2, copying ?!?!?) behaviour. + + So we will simply include the complete .c source instead... + */ + +#include "libxml_reporter_tests.c" + diff -Nru cgreen-1.3.0/tests/messaging_tests.c cgreen-1.6.3/tests/messaging_tests.c --- cgreen-1.3.0/tests/messaging_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/messaging_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -41,7 +41,7 @@ } Ensure(failure_reported_and_exception_thrown_when_messaging_would_block) { - const int LOOPS = 65536; + const int LOOPS = 65537; int messaging = start_cgreen_messaging(33); int loop; char panic_message[1000]; diff -Nru cgreen-1.3.0/tests/mock_messages_tests.c cgreen-1.6.3/tests/mock_messages_tests.c --- cgreen-1.3.0/tests/mock_messages_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/mock_messages_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -57,6 +57,11 @@ assert_that(integer_out(), is_equal_to(3)); } +static void unexpected_mock(void) { mock(); } + +Ensure(Mocks, lists_unexpected_mock_calls) { unexpected_mock(); } + + // These are tentative solutions to mocks returning doubles and // trapping type errors when they do. diff -Nru cgreen-1.3.0/tests/mock_messages_tests.expected cgreen-1.6.3/tests/mock_messages_tests.expected --- cgreen-1.3.0/tests/mock_messages_tests.expected 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/mock_messages_tests.expected 2023-10-03 15:16:52.000000000 +0000 @@ -1,68 +1,71 @@ -Running "mock_messages_tests" (20 tests)... -mock_messages_tests.c: Failure: Mocks -> calls_beyond_expected_sequence_fail_when_mocks_are_strict +Running "mock_messages_tests" (21 tests)... +mock_messages_tests.c:000: Failure: Mocks -> calls_beyond_expected_sequence_fail_when_mocks_are_strict Mocked function [integer_out] was called too many times -mock_messages_tests.c: Failure: Mocks -> calls_beyond_expected_sequence_fail_when_mocks_are_strict +mock_messages_tests.c:000: Failure: Mocks -> calls_beyond_expected_sequence_fail_when_mocks_are_strict Expected [integer_out()] to [equal] [3] actual value: [0] expected value: [3] -mock_messages_tests.c: Failure: Mocks -> can_declare_function_never_called +mock_messages_tests.c:000: Failure: Mocks -> can_declare_function_never_called Mocked function [sample_mock] has an expectation that it will never be called, but it was -mock_messages_tests.c: Failure: Mocks -> constraint_number_of_calls_out_of_order_expectations_fail +mock_messages_tests.c:000: Failure: Mocks -> constraint_number_of_calls_out_of_order_expectations_fail Expected [[first] parameter in [simple_mocked_function]] to [equal] [1] actual value: [2] expected value: [1] -mock_messages_tests.c: Failure: Mocks -> constraint_number_of_calls_out_of_order_expectations_fail +mock_messages_tests.c:000: Failure: Mocks -> constraint_number_of_calls_out_of_order_expectations_fail Expected [[first] parameter in [simple_mocked_function]] to [equal] [2] actual value: [1] expected value: [2] -mock_messages_tests.c: Failure: Mocks -> constraint_number_of_calls_when_not_called_enough_times +mock_messages_tests.c:000: Failure: Mocks -> constraint_number_of_calls_when_not_called_enough_times Expected [simple_mocked_function] to [be called] [times] actual value: [1] expected to have been called: [2] times -mock_messages_tests.c: Failure: Mocks -> failure_reported_when_expect_after_always_expect_for_same_function +mock_messages_tests.c:000: Failure: Mocks -> failure_reported_when_expect_after_always_expect_for_same_function Mocked function [integer_out] already has an expectation that it will always be called a certain way; any expectations declared after an always expectation are invalid -mock_messages_tests.c: Failure: Mocks -> failure_reported_when_expect_after_never_expect_for_same_function +mock_messages_tests.c:000: Failure: Mocks -> failure_reported_when_expect_after_never_expect_for_same_function Mocked function [integer_out] already has an expectation that it will never be called; any expectations declared after a never call expectation are invalid -mock_messages_tests.c: Failure: Mocks -> failure_when_no_presets_for_default_strict_mock +mock_messages_tests.c:000: Failure: Mocks -> failure_when_no_presets_for_default_strict_mock Mocked function [integer_out] did not have an expectation that it would be called -mock_messages_tests.c: Failure: Mocks -> reports_always_expect_after_never_expect_for_same_function +mock_messages_tests.c:000: Failure: Mocks -> lists_unexpected_mock_calls + Mocked function [unexpected_mock] did not have an expectation that it would be called + +mock_messages_tests.c:000: Failure: Mocks -> reports_always_expect_after_never_expect_for_same_function Mocked function [integer_out] already has an expectation that it will never be called; any expectations declared after a never call expectation are discarded -mock_messages_tests.c: Failure: Mocks -> reports_multiple_always_expect +mock_messages_tests.c:000: Failure: Mocks -> reports_multiple_always_expect Mocked function [integer_out] already has an expectation and will always be called a certain way; any expectations declared after an always expectation are discarded -mock_messages_tests.c: Failure: Mocks -> reports_multiple_never_expect +mock_messages_tests.c:000: Failure: Mocks -> reports_multiple_never_expect Mocked function [integer_out] already has an expectation that it will never be called; declaring an expectation for a function after a never call expectation is not allowed -mock_messages_tests.c: Failure: Mocks -> reports_never_expect_after_always_expect_for_same_function +mock_messages_tests.c:000: Failure: Mocks -> reports_never_expect_after_always_expect_for_same_function Mocked function [integer_out] already has an expectation and will always be called a certain way; declaring an expectation after an always expectation is not allowed -mock_messages_tests.c: Failure: Mocks -> should_detect_two_unfulfilled_expectations_on_unknown_functions +mock_messages_tests.c:000: Failure: Mocks -> should_detect_two_unfulfilled_expectations_on_unknown_functions Expected call was not made to mocked function [f1] -mock_messages_tests.c: Failure: Mocks -> should_detect_two_unfulfilled_expectations_on_unknown_functions +mock_messages_tests.c:000: Failure: Mocks -> should_detect_two_unfulfilled_expectations_on_unknown_functions Expected call was not made to mocked function [f2] -mock_messages_tests.c: Failure: Mocks -> should_detect_two_unfulfilled_expectations_without_constraints_on_unknown_functions +mock_messages_tests.c:000: Failure: Mocks -> should_detect_two_unfulfilled_expectations_without_constraints_on_unknown_functions Expected call was not made to mocked function [f1] -mock_messages_tests.c: Failure: Mocks -> should_detect_two_unfulfilled_expectations_without_constraints_on_unknown_functions +mock_messages_tests.c:000: Failure: Mocks -> should_detect_two_unfulfilled_expectations_without_constraints_on_unknown_functions Expected call was not made to mocked function [f2] -mock_messages_tests.c: Failure: Mocks -> single_uncalled_expectation_fails_tally +mock_messages_tests.c:000: Failure: Mocks -> single_uncalled_expectation_fails_tally Expected call was not made to mocked function [string_out] - "Mocks": 5 passes, 2 skipped, 18 failures in 0ms. -Completed "mock_messages_tests": 5 passes, 2 skipped, 18 failures in 0ms. + "Mocks": 5 passes, 2 skipped, 19 failures in 0ms. +Completed "mock_messages_tests": 5 passes, 2 skipped, 19 failures in 0ms. Mocks -> can_learn_double_expects : Learned mocks are expect(double_in, when(in, is_equal_to_double(3.140000))); Mocks -> learning_mocks_emit_none_when_learning_no_mocks : Learned mocks are diff -Nru cgreen-1.3.0/tests/mocks_struct_tests.c cgreen-1.6.3/tests/mocks_struct_tests.c --- cgreen-1.3.0/tests/mocks_struct_tests.c 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/tests/mocks_struct_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,67 @@ +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +using namespace cgreen; + +namespace cgreen { + extern "C" { + extern CgreenValue make_cgreen_pointer_value(void *ptr); + } +} + +#else +extern CgreenValue make_cgreen_pointer_value(void *ptr); +#endif + +Describe(MockStruct); +BeforeEach(MockStruct) {} +AfterEach(MockStruct) {} + +typedef struct { + int i; + const char *string; +} Struct; + + +/* If you are only interested in a single, or few, fields: */ +void function_mocking_field(Struct s) { + mock(s.i); +} + +Ensure(MockStruct, can_mock_a_struct_parameters_field) { + Struct struct_to_send = { .i = 12, .string = "hello" }; + + expect(function_mocking_field, when(s.i, is_equal_to(12))); + + function_mocking_field(struct_to_send); +} + +void *cgreen_memdup(void *s, size_t size) { + void *p = malloc(size); + memcpy(p, s, size); + return p; +} + +#define memdup(s) cgreen_memdup(&s, sizeof(s)) + +/* If you need to get the whole struct: */ +void function_mocking_the_whole_struct(Struct s) { + Struct *sP = (Struct *)memdup(s); + mock(sP); +} + +Ensure(MockStruct, can_mock_a_struct_parameter) { + Struct struct_to_send = { .i = 13, .string = "hello" }; + Struct *p; + + expect(function_mocking_the_whole_struct, will_capture_parameter(sP, p)); + + function_mocking_the_whole_struct(struct_to_send); + + assert_that(p->i, is_equal_to(13)); +} diff -Nru cgreen-1.3.0/tests/mocks_struct_tests.cpp cgreen-1.6.3/tests/mocks_struct_tests.cpp --- cgreen-1.3.0/tests/mocks_struct_tests.cpp 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/tests/mocks_struct_tests.cpp 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,11 @@ +/* + This file used to be a link to the corresponding .c file because we + want to compile the same tests for C and C++. But since some systems + don't handle symbolic links the same way as *ix systems we get + inconsistencies (looking at you Cygwin) or plain out wrong (looking + at you MSYS2, copying ?!?!?) behaviour. + + So we will simply include the complete .c source instead... + */ + +#include "mocks_struct_tests.c" diff -Nru cgreen-1.3.0/tests/mocks_tests.c cgreen-1.6.3/tests/mocks_tests.c --- cgreen-1.3.0/tests/mocks_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/mocks_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -139,6 +139,46 @@ string_in("anything"); } +static void string_in2(const char *s2) { + mock(s2); +} + +Ensure(Mocks, expecting_twice_with_non_null_constraint_should_not_mix_them_up) { + expect(string_in, when(s, is_not_null)); + expect(string_in2, when(s2, is_not_null)); + string_in("anything"); + string_in2("anything"); +} + +Ensure(Mocks, expecting_twice_with_is_null_constraint_should_not_mix_them_up) { + expect(string_in, when(s, is_null)); + expect(string_in2, when(s2, is_null)); + string_in(NULL); + string_in2(NULL); +} + + +static void bool_in(bool b) { + mock(b); +} +static void bool_in2(bool b2) { + mock(b2); +} + +Ensure(Mocks, expecting_twice_with_is_true_constraint_should_not_mix_them_up) { + expect(bool_in, when(b, is_true)); + expect(bool_in2, when(b2, is_true)); + bool_in(true); + bool_in2(true); +} + +Ensure(Mocks, expecting_twice_with_is_false_constraint_should_not_mix_them_up) { + expect(bool_in, when(b, is_false)); + expect(bool_in2, when(b2, is_false)); + bool_in(false); + bool_in2(false); +} + static void double_in(double d) { mock(box_double(d)); } @@ -325,6 +365,19 @@ assert_that(&local, is_equal_to_contents_of(&actual, sizeof(LargerThanIntptr))); } +static void mocked_read(char *ch) { + mock(ch); +} + +Ensure(Mocks, can_stub_a_char_out_parameter) { + char stubbed_char = 'a'; + char returned_char; + expect(mocked_read, + will_set_contents_of_parameter(ch, &stubbed_char, 1)); + mocked_read(&returned_char); + assert_that(returned_char, is_equal_to(stubbed_char)); +} + // function which when mocked will be referred to by preprocessor macro static void function_macro_mock(void) { mock(); @@ -380,13 +433,25 @@ simple_mocked_function(1, 2); } -static int sideeffect_changed = 1; +Ensure(Mocks, can_capture_parameter) { + int captured_first = 0; + int captured_second = 0; + + expect(simple_mocked_function, + will_capture_parameter(first, captured_first), + will_capture_parameter(second, captured_second)); + simple_mocked_function(0x12345678, 0x76543210); + assert_that(captured_first, is_equal_to_hex(0x12345678)); + assert_that(captured_second, is_equal_to_hex(0x76543210)); +} + +static int changed_by_sideeffect = 1; static int mock_with_side_effect(void) { return (int)mock(); } static void the_sideeffect(void * data) { assert_that(*(int*)data, is_equal_to(99)); - sideeffect_changed = 2; + changed_by_sideeffect = 2; } Ensure(Mocks, mock_expect_with_side_effect) { @@ -397,49 +462,56 @@ assert_that(mock_with_side_effect(), is_equal_to(22)); - assert_that(sideeffect_changed, is_equal_to(2)); + assert_that(changed_by_sideeffect, is_equal_to(2)); } -typedef struct Box { +typedef struct Struct { int height; int weight; -} Box; +} Struct; -Box retrieveBox(void) { - return *(Box *)mock(); +Struct retrieveStruct(void) { + Struct *struct_p = (Struct *)mock(); + Struct the_struct = *struct_p; + free(struct_p); + return the_struct; } Ensure(Mocks, can_return_by_value) { - Box someBox = {.height = 10, .weight = 20}; - expect(retrieveBox, will_return_by_value(someBox, sizeof(Box))); - someBox.height = 30; - - Box actualBox = retrieveBox(); - assert_that(actualBox.weight, is_equal_to(20)); - assert_that(actualBox.height, is_equal_to(10)); + Struct someStruct = {.height = 10, .weight = 20}; + expect(retrieveStruct, will_return_by_value(someStruct, sizeof(Struct))); + someStruct.height = 30; + + Struct actualStruct = retrieveStruct(); + assert_that(actualStruct.weight, is_equal_to(20)); + assert_that(actualStruct.height, is_equal_to(10)); } -Box retrieveSpecialBox(int boxNumber) { - return *(Box *)mock(boxNumber); +Struct retrieveSpecialStruct(int structNumber) { + Struct *struct_p = (Struct *)mock(structNumber); + Struct the_struct = *struct_p; + free(struct_p); + return the_struct; } Ensure(Mocks, can_return_by_value_depending_on_input_parameter) { - Box box1 = {.height = 10, .weight = 20}; - Box box2 = {.height = 5, .weight = 33}; - expect(retrieveSpecialBox, will_return_by_value(box1, sizeof(Box)), when(boxNumber, is_equal_to(1))); - expect(retrieveSpecialBox, will_return_by_value(box2, sizeof(Box)), when(boxNumber, is_equal_to(2))); - box1.height = 30; - - Box retrievedBox1 = retrieveSpecialBox(1); - assert_that(retrievedBox1.weight, is_equal_to(20)); - assert_that(retrievedBox1.height, is_equal_to(10)); - Box retrievedBox2 = retrieveSpecialBox(2); - assert_that(retrievedBox2.weight, is_equal_to(33)); - assert_that(retrievedBox2.height, is_equal_to(5)); + Struct struct1 = {.height = 10, .weight = 20}; + Struct struct2 = {.height = 5, .weight = 33}; + expect(retrieveSpecialStruct, will_return_by_value(struct1, sizeof(Struct)), + when(structNumber, is_equal_to(1))); + expect(retrieveSpecialStruct, will_return_by_value(struct2, sizeof(Struct)), + when(structNumber, is_equal_to(2))); + struct1.height = 30; + + Struct retrievedStruct1 = retrieveSpecialStruct(1); + assert_that(retrievedStruct1.weight, is_equal_to(20)); + assert_that(retrievedStruct1.height, is_equal_to(10)); + Struct retrievedStruct2 = retrieveSpecialStruct(2); + assert_that(retrievedStruct2.weight, is_equal_to(33)); + assert_that(retrievedStruct2.height, is_equal_to(5)); } - TestSuite *mock_tests(void) { TestSuite *suite = create_test_suite(); add_test_with_context(suite, Mocks, default_return_value_when_no_presets_for_loose_mock); diff -Nru cgreen-1.3.0/tests/normalize_libxml_output_tests.sed cgreen-1.6.3/tests/normalize_libxml_output_tests.sed --- cgreen-1.3.0/tests/normalize_libxml_output_tests.sed 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/tests/normalize_libxml_output_tests.sed 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,6 @@ +# 'time="?.?????"' => time="0.00000" +s/time=".+"/time="0.00000"/g +s/line=".+"/line="0"/g +# filenames, suites, libraries and "classnames" starting with "lib" or "cyg" +s/"lib/"/g +s/"cyg/"/g diff -Nru cgreen-1.3.0/tests/normalize_xml_output_tests.sed cgreen-1.6.3/tests/normalize_xml_output_tests.sed --- cgreen-1.3.0/tests/normalize_xml_output_tests.sed 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/normalize_xml_output_tests.sed 2023-10-03 15:16:52.000000000 +0000 @@ -1,6 +1,7 @@ # 'time="?.?????"' => time="0.00000" s/time=".+"/time="0.00000"/g s/line=".+"/line="0"/g -# filenames, suites, libraries and "classnames" starting with "lib" or "cyg" +# filenames, suites, libraries and "classnames" starting with "lib", "cyg" or ... s/"lib/"/g s/"cyg/"/g +s/"msys-/"/g diff -Nru cgreen-1.3.0/tests/text_reporter_tests.c cgreen-1.6.3/tests/text_reporter_tests.c --- cgreen-1.3.0/tests/text_reporter_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/text_reporter_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -37,7 +37,7 @@ static int mocked_vprinter(const char *format, va_list ap) { char buffer[10000]; - vsprintf(buffer, format, ap); + vsnprintf(buffer, sizeof(buffer), format, ap); output = concat(output, buffer); return strlen(output); diff -Nru cgreen-1.3.0/tests/unit_tests.c cgreen-1.6.3/tests/unit_tests.c --- cgreen-1.3.0/tests/unit_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/unit_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -26,37 +26,46 @@ unit_tests_teardown(); } +Ensure(Unittests, can_see_correct_version_marking) { + char version_string[20]; + + snprintf(version_string, sizeof(version_string), "%d.%d.%d", CGREEN_VERSION_MAJOR, CGREEN_VERSION_MINOR, CGREEN_VERSION_PATCH); + + assert_that(cgreen_library_version, is_equal_to_string(CGREEN_VERSION)); + assert_that(CGREEN_VERSION, is_equal_to_string(version_string)); +} + Ensure(Unittests, count_tests_return_zero_for_empty_suite) { - assert_that(count_tests(suite), is_equal_to(0)); + assert_that(count_tests(suite), is_equal_to(0)); } Ensure(Unittests, count_tests_return_one_for_suite_with_one_testcase) { - add_test_with_context(suite, Unittests, count_tests_return_one_for_suite_with_one_testcase); - assert_that(count_tests(suite), is_equal_to(1)); + add_test_with_context(suite, Unittests, count_tests_return_one_for_suite_with_one_testcase); + assert_that(count_tests(suite), is_equal_to(1)); } Ensure(Unittests, count_tests_return_four_for_four_nested_suite_with_one_testcase_each) { - TestSuite *suite2 = create_test_suite(); - TestSuite *suite3 = create_test_suite(); - TestSuite *suite4 = create_test_suite(); - add_test_with_context(suite, Unittests, count_tests_return_one_for_suite_with_one_testcase); - add_suite(suite, suite2); - add_test_with_context(suite2, Unittests, count_tests_return_one_for_suite_with_one_testcase); - add_suite(suite2, suite3); - add_test_with_context(suite3, Unittests, count_tests_return_one_for_suite_with_one_testcase); - add_suite(suite3, suite4); - add_test_with_context(suite4, Unittests, count_tests_return_one_for_suite_with_one_testcase); - assert_that(count_tests(suite), is_equal_to(4)); + TestSuite *suite2 = create_test_suite(); + TestSuite *suite3 = create_test_suite(); + TestSuite *suite4 = create_test_suite(); + add_test_with_context(suite, Unittests, count_tests_return_one_for_suite_with_one_testcase); + add_suite(suite, suite2); + add_test_with_context(suite2, Unittests, count_tests_return_one_for_suite_with_one_testcase); + add_suite(suite2, suite3); + add_test_with_context(suite3, Unittests, count_tests_return_one_for_suite_with_one_testcase); + add_suite(suite3, suite4); + add_test_with_context(suite4, Unittests, count_tests_return_one_for_suite_with_one_testcase); + assert_that(count_tests(suite), is_equal_to(4)); } TestSuite *unit_tests(void) { - TestSuite *suite = create_test_suite(); - set_setup(suite, unit_tests_setup); + TestSuite *suite = create_test_suite(); + set_setup(suite, unit_tests_setup); - add_test_with_context(suite, Unittests, count_tests_return_zero_for_empty_suite); - add_test_with_context(suite, Unittests, count_tests_return_one_for_suite_with_one_testcase); - add_test_with_context(suite, Unittests, count_tests_return_four_for_four_nested_suite_with_one_testcase_each); + add_test_with_context(suite, Unittests, count_tests_return_zero_for_empty_suite); + add_test_with_context(suite, Unittests, count_tests_return_one_for_suite_with_one_testcase); + add_test_with_context(suite, Unittests, count_tests_return_four_for_four_nested_suite_with_one_testcase_each); - set_teardown(suite, unit_tests_teardown); - return suite; + set_teardown(suite, unit_tests_teardown); + return suite; } diff -Nru cgreen-1.3.0/tests/utils_tests.c cgreen-1.6.3/tests/utils_tests.c --- cgreen-1.3.0/tests/utils_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/utils_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -1,4 +1,5 @@ #include +#include #include "../src/utils.h" @@ -19,5 +20,5 @@ char buffer[100]; panic_set_output_buffer(buffer); PANIC(""); - assert_that(buffer, contains_string(__FILE__)); + assert_that(buffer, contains_string(FILENAME)); } diff -Nru cgreen-1.3.0/tests/xml_output_tests.c cgreen-1.6.3/tests/xml_output_tests.c --- cgreen-1.3.0/tests/xml_output_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/xml_output_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -1,4 +1,5 @@ #include +#include Ensure(failing_test_is_listed_by_xml_reporter) { assert_that(false); @@ -7,3 +8,14 @@ Ensure(passing_test_is_listed_by_xml_reporter) { assert_that(true); } + +Ensure(error_message_gets_escaped_by_xml_reporter) { + char *test_string = + "\n" + "\n" + " I'm the content & have chars which have to be escaped, " + "if put in outer XML.\n" + ""; + char *expected_string = "I'm not to be found!"; + assert_that(test_string, contains_string(expected_string)); +} diff -Nru cgreen-1.3.0/tests/xml_output_tests.expected cgreen-1.6.3/tests/xml_output_tests.expected --- cgreen-1.3.0/tests/xml_output_tests.expected 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/xml_output_tests.expected 2023-10-03 15:16:52.000000000 +0000 @@ -3,6 +3,16 @@ + + + + + diff -Nru cgreen-1.3.0/tests/xml_reporter_tests.c cgreen-1.6.3/tests/xml_reporter_tests.c --- cgreen-1.3.0/tests/xml_reporter_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tests/xml_reporter_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -45,7 +45,7 @@ char buffer[10000]; va_list ap; va_start(ap, format); - vsprintf(buffer, format, ap); + vsnprintf(buffer, sizeof(buffer), format, ap); va_end(ap); (void)file; diff -Nru cgreen-1.3.0/tools/cgreen_completion.bash cgreen-1.6.3/tools/cgreen_completion.bash --- cgreen-1.3.0/tools/cgreen_completion.bash 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/tools/cgreen_completion.bash 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,151 @@ +# Contributed by Yavor Lulchev @RookieWookiee +# Improved by @thoni56 + +# Could not find a way to do these three in a general function (bash isn't my native language...) +_removeFromOptions() { + new_array=() + for value in "${options[@]}" + do + [[ $value != $1 ]] && new_array+=($value) + done + options=("${new_array[@]}") + unset new_array +} + +_removeFromLibraries() { + new_array=() + for value in "${libraries[@]}" + do + [[ $value != $1 ]] && new_array+=($value) + done + libraries=("${new_array[@]}") + unset new_array +} + +_removeFromTests() { + new_array=() + for value in "${tests[@]}" + do + [[ $value != $1 ]] && new_array+=($value) + done + tests=("${new_array[@]}") + unset new_array +} + +_discover_tests() +{ + if test -f $word ; then + local raw=$(nm -f posix $word | grep -o -E 'CgreenSpec\w*?\b') + local specs=( $(echo "$raw" | sed 's/^CgreenSpec__//g' | sed 's/__$//g') ) + # specs should be an array with all __ from the library in $word + local SUTs=( $(printf "%s\n" "${specs[@]}" | awk -F '__' '{ print $1 }' | sort | uniq) ) + # SUTs should now contain all SUTs present in the library + + for SUT in "${SUTs[@]}"; do + sut_tests=() + for spec in "${specs[@]}"; do + if [[ $spec == "$SUT"* ]] ; then + sut_tests+=( $spec ) + fi + done + if test $SUT = "default" ; then + # Tests have no SUT in its name + tests+=( ${sut_tests[@]/default__/} ) + else + tests+=( ${sut_tests[@]/__/:} ) + fi + done + fi +} + +case $OSTYPE in + darwin* ) + LIBEXT=dylib + ;; + linux* ) + LIBEXT=so + ;; + cygwin ) + LIBEXT=dll + ;; +esac + +_cgreen_runner_completion() +{ + local options libraries tests + options=("--colours" "--no-colours" "--xml" "--suite" "--verbose" "--no-run" "--help" " --version") + libraries=() + tests=() + + # Look for words in the command given so far + for word in ${COMP_WORDS[@]:1}; do + # Matching loadable libraries? + if compgen -G "$word*.$LIBEXT" > /dev/null; then + # Add that pattern + libraries+=($word*.$LIBEXT) + fi + # Is it a library? + if echo $word | grep -q -E "\b\.$LIBEXT\b"; then + _discover_tests + fi + done + + # Remove all suggestions already used + for word in ${COMP_WORDS[@]:1}; do + if [[ "$word" != "${COMP_WORDS[0]}" ]] ; then + _removeFromOptions $word + _removeFromLibraries $word + _removeFromTests $word + fi + done + + completion_word=${COMP_WORDS[$COMP_CWORD]} + expansions="${options[@]} ${libraries[@]} ${tests[@]}" + + COMPREPLY=($(compgen -W "$expansions" -- "$completion_word")) +} + +_cgreen_debug_completion() +{ + local options libraries tests + options=("--debugger") + libraries=() + tests=() + + # Look for words in the command given so far + for word in ${COMP_WORDS[@]}; do + # Matching loadable libraries? + if compgen -G "$word*.$LIBEXT" > /dev/null; then + # Add that pattern + libraries+=($word*.$LIBEXT) + fi + if echo $word | grep -q -E "\b\.$LIBEXT\b"; then + _discover_tests + fi + done + + # Remove libraries and tests if already used (only one library and one test allowed) + for word in ${COMP_WORDS[@]}; do + if [[ $word == *".$LIBEXT" ]]; then + # Only one library allowed + libraries=() + fi + if [[ $word == *"\\:"* ]]; then + if [[ "${COMP_WORDS[$COMP_CWORD]}" == "" ]]; then + # Only one test allowed + tests=() + fi + fi + if [[ "$word" != "${COMP_WORDS[0]}" ]] ; then + _removeFromOptions $word + fi + done + + completion_word=${COMP_WORDS[$COMP_CWORD]} + expansions="${options[@]} ${libraries[@]} ${tests[@]}" + + COMPREPLY=($(compgen -W "$expansions" -- "$completion_word")) +} + +complete -o nosort -o dirnames -A directory -F _cgreen_runner_completion cgreen-runner +complete -o nosort -A directory -F _cgreen_debug_completion cgreen-debug diff -Nru cgreen-1.3.0/tools/cgreen-debug cgreen-1.6.3/tools/cgreen-debug --- cgreen-1.3.0/tools/cgreen-debug 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/cgreen-debug 2023-10-03 15:16:52.000000000 +0000 @@ -1,4 +1,4 @@ -#!/usr/bin/bash +#!/bin/bash # cgreen-debug # # Script to start cgreen-runner under gdb, load a library and break @@ -21,94 +21,108 @@ # if you have argbash installed, or go to https://argbash.io and paste # this file into the online version. -# ARG_HELP([Start cgreen-runner under GDB and break at a specific test]) +# ARG_HELP([Start cgreen-runner under GDB (or other debugger) and break at a specific test]) +# ARG_OPTIONAL_SINGLE([debugger],[d],[The debugger to use],[cgdb]) # ARG_POSITIONAL_SINGLE([library],[Dynamically loadable library with Cgreen tests],[]) # ARG_POSITIONAL_SINGLE([testname],[The test to debug, in Cgreen notation (':')],[]) # ARGBASH_GO() # needed because of Argbash --> m4_ignore([ -### START OF CODE GENERATED BY Argbash v2.7.1 one line above ### +### START OF CODE GENERATED BY Argbash v2.9.0 one line above ### # Argbash is a bash code generator used to get arguments parsing right. # Argbash is FREE SOFTWARE, see https://argbash.io for more info +# Generated online by https://argbash.io/generate die() { - local _ret=$2 - test -n "$_ret" || _ret=1 - test "$_PRINT_HELP" = yes && print_help >&2 - echo "$1" >&2 - exit ${_ret} + local _ret="${2:-1}" + test "${_PRINT_HELP:-no}" = yes && print_help >&2 + echo "$1" >&2 + exit "${_ret}" } begins_with_short_option() { - local first_option all_short_options='h' - first_option="${1:0:1}" - test "$all_short_options" = "${all_short_options/$first_option/}" && return 1 || return 0 + local first_option all_short_options='hd' + first_option="${1:0:1}" + test "$all_short_options" = "${all_short_options/$first_option/}" && return 1 || return 0 } # THE DEFAULTS INITIALIZATION - POSITIONALS _positionals=() # THE DEFAULTS INITIALIZATION - OPTIONALS +_arg_debugger="cgdb" print_help() { - printf '%s\n' "Start cgreen-runner under GDB and break at a specific test" - printf 'Usage: %s [-h|--help] \n' "$0" - printf '\t%s\n' ": Dynamically loadable library with Cgreen tests" - printf '\t%s\n' ": The test to debug, in Cgreen notation (':')" - printf '\t%s\n' "-h, --help: Prints help" + printf '%s\n' "Start cgreen-runner under GDB (or other debugger) and break at a specific test" + printf 'Usage: %s [-h|--help] [-d|--debugger ] \n' "$0" + printf '\t%s\n' ": Dynamically loadable library with Cgreen tests" + printf '\t%s\n' ": The test to debug, in Cgreen notation (':')" + printf '\t%s\n' "-h, --help: Prints help" + printf '\t%s\n' "-d, --debugger: The debugger to use (default: 'cgdb')" } parse_commandline() { - _positionals_count=0 - while test $# -gt 0 - do - _key="$1" - case "$_key" in - -h|--help) - print_help - exit 0 - ;; - -h*) - print_help - exit 0 - ;; - *) - _last_positional="$1" - _positionals+=("$_last_positional") - _positionals_count=$((_positionals_count + 1)) - ;; - esac - shift - done + _positionals_count=0 + while test $# -gt 0 + do + _key="$1" + case "$_key" in + -h|--help) + print_help + exit 0 + ;; + -h*) + print_help + exit 0 + ;; + -d|--debugger) + test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 + _arg_debugger="$2" + shift + ;; + --debugger=*) + _arg_debugger="${_key##--debugger=}" + ;; + -d*) + _arg_debugger="${_key##-d}" + ;; + *) + _last_positional="$1" + _positionals+=("$_last_positional") + _positionals_count=$((_positionals_count + 1)) + ;; + esac + shift + done } handle_passed_args_count() { - local _required_args_string="'library' and 'testname'" - test "${_positionals_count}" -ge 2 || _PRINT_HELP=yes die "FATAL ERROR: Not enough positional arguments - we require exactly 2 (namely: $_required_args_string), but got only ${_positionals_count}." 1 - test "${_positionals_count}" -le 2 || _PRINT_HELP=yes die "FATAL ERROR: There were spurious positional arguments --- we expect exactly 2 (namely: $_required_args_string), but got ${_positionals_count} (the last one was: '${_last_positional}')." 1 + local _required_args_string="'library' and 'testname'" + test "${_positionals_count}" -ge 2 || _PRINT_HELP=yes die "FATAL ERROR: Not enough positional arguments - we require exactly 2 (namely: $_required_args_string), but got only ${_positionals_count}." 1 + test "${_positionals_count}" -le 2 || _PRINT_HELP=yes die "FATAL ERROR: There were spurious positional arguments --- we expect exactly 2 (namely: $_required_args_string), but got ${_positionals_count} (the last one was: '${_last_positional}')." 1 } assign_positional_args() { - local _positional_name _shift_for=$1 - _positional_names="_arg_library _arg_testname " + local _positional_name _shift_for=$1 + _positional_names="_arg_library _arg_testname " - shift "$_shift_for" - for _positional_name in ${_positional_names} - do - test $# -gt 0 || break - eval "$_positional_name=\${1}" || die "Error during argument parsing, possibly an Argbash bug." 1 - shift - done + shift "$_shift_for" + for _positional_name in ${_positional_names} + do + test $# -gt 0 || break + eval "$_positional_name=\${1}" || die "Error during argument parsing, possibly an Argbash bug." 1 + shift + done } parse_commandline "$@" @@ -120,16 +134,34 @@ ### END OF CODE GENERATED BY Argbash (sortof) ### ]) # [ <-- needed because of Argbash +if [ "$_arg_debugger" == "" ]; then + if command -v cgdb > /dev/null 2>&1 ; then + debugger=cgdb + else + debugger=gdb + fi +else + if command -v $_arg_debugger > /dev/null 2>&1 ; then + debugger=$_arg_debugger + else + echo "No such debugger: $_arg_debugger" + exit 1 + fi +fi -bp=${2//:/__} -echo break $bp > .cgreen-debug-commands -echo run $1 $2 >> .cgreen-debug-commands -if command -v cgdb ; then - debugger=cgdb +# Figure out where to place breakpoint by replacing ':' with '__' +bp=${_arg_testname//:/__} + +if [ "$debugger" == "lldb" ] ; then + echo break set -n $bp > .cgreen-debug-commands + echo run $_arg_library $_arg_testname >> .cgreen-debug-commands + $debugger cgreen-runner --source .cgreen-debug-commands else - debugger=gdb + echo break $bp > .cgreen-debug-commands + echo run $_arg_library $_arg_testname >> .cgreen-debug-commands + $debugger -ex "set breakpoint pending on" cgreen-runner --command=.cgreen-debug-commands fi -$debugger -ex "set breakpoint pending on" cgreen-runner --command=.cgreen-debug-commands -rm .cgreen-debug-commands + +#rm .cgreen-debug-commands # ] <-- needed because of Argbash diff -Nru cgreen-1.3.0/tools/cgreen-debug.argbash cgreen-1.6.3/tools/cgreen-debug.argbash --- cgreen-1.3.0/tools/cgreen-debug.argbash 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/cgreen-debug.argbash 2023-10-03 15:16:52.000000000 +0000 @@ -1,4 +1,4 @@ -#!/usr/bin/bash +#!/bin/bash # cgreen-debug # # Script to start cgreen-runner under gdb, load a library and break @@ -21,22 +21,42 @@ # if you have argbash installed, or go to https://argbash.io and paste # this file into the online version. -# ARG_HELP([Start cgreen-runner under GDB and break at a specific test]) +# ARG_HELP([Start cgreen-runner under GDB (or other debugger) and break at a specific test]) +# ARG_OPTIONAL_SINGLE([debugger], d, [The debugger to use], [cgdb]) # ARG_POSITIONAL_SINGLE([library], [Dynamically loadable library with Cgreen tests], ) # ARG_POSITIONAL_SINGLE([testname], [The test to debug, in Cgreen notation (':')], ) # ARGBASH_GO # [ <-- needed because of Argbash -bp=${2//:/__} -echo break $bp > .cgreen-debug-commands -echo run $1 $2 >> .cgreen-debug-commands -if command -v cgdb ; then - debugger=cgdb +if [ "$_arg_debugger" == "" ]; then + if command -v cgdb > /dev/null 2>&1 ; then + debugger=cgdb + else + debugger=gdb + fi else - debugger=gdb + if command -v $_arg_debugger > /dev/null 2>&1 ; then + debugger=$_arg_debugger + else + echo "No such debugger: $_arg_debugger" + exit 1 + fi fi -$debugger -ex "set breakpoint pending on" cgreen-runner --command=.cgreen-debug-commands + +# Figure out where to place breakpoint by replacing ':' with '__' +bp=${_arg_testname//:/__} + +if [ "$debugger" == "lldb" ] ; then + echo break set -n $bp > .cgreen-debug-commands + echo run $_arg_library $_arg_testname >> .cgreen-debug-commands + $debugger cgreen-runner --source .cgreen-debug-commands +else + echo break $bp > .cgreen-debug-commands + echo run $_arg_library $_arg_testname >> .cgreen-debug-commands + $debugger -ex "set breakpoint pending on" cgreen-runner --command=.cgreen-debug-commands +fi + rm .cgreen-debug-commands # ] <-- needed because of Argbash diff -Nru cgreen-1.3.0/tools/cgreen_libxml_output_diff cgreen-1.6.3/tools/cgreen_libxml_output_diff --- cgreen-1.3.0/tools/cgreen_libxml_output_diff 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/tools/cgreen_libxml_output_diff 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,118 @@ +#!/bin/bash +# +# Will run the cgreen-runner in directory ../tools +# on library with name $1 (e.g. 'lib${name}.so') +# generating XML output that will be compared to +# sources are in $2 for... +# ...expected output file name in $3 +# ...and commands to normalize output (in the file normalize_{name}.sed) +# +# Determine OS +unameo=`uname -o 1>/dev/null 2>/dev/null; echo $?` +if [ $unameo -eq 0 ]; then + OS=`uname -o` +else + OS=`uname -s` +fi + +# Set up library prefix and extension +case "$OS" in +Darwin) + prefix=lib + extension=dylib + ;; +GNU/Linux|FreeBSD) + prefix=lib + extension=so + ;; +Cygwin) + prefix=cyg + extension=dll + ;; +Msys) + prefix=lib + extension=dll + ;; +*) + echo "ERROR: $0 can't handle OS=$OS" + exit 2 + ;; +esac + +# Handle arguments +if [ $# -ne 3 ]; then + echo "ERROR: $0 requires exactly 3 arguments" + exit 2 +fi + +# TODO: don't shift +name=$1; shift 1 + +sourcedir=$1 ; shift 1 +sourcedir=$(perl -e 'use Cwd "abs_path"; print abs_path(@ARGV[0])' -- "$sourcedir") +if [ $(uname -n) = thoni64 ]; then + # On my Cygwin machine I have linked /home to c:/Users so absolute paths don't match + # what runner prints, so try to replace that (don't know how to generalize this...) + sourcedir=`echo $sourcedir | sed -e s#/cygdrive/c/Users#/home#` +fi + +# TODO: don't shift +expected=$1 ; shift 1 + +commandfile="${sourcedir}/normalize_${name}.sed" + +# Do it! +if [ -z "$CGREEN_PER_TEST_TIMEOUT" ]; then + printf "Comparing output of ${name} to expected: " +else + printf "Comparing output of ${name} to expected with CGREEN_PER_TEST_TIMEOUT=$CGREEN_PER_TEST_TIMEOUT: " +fi + +# Run runner on library store output and error +../tools/cgreen-runner -X L "${prefix}${name}.${extension}" > "${name}.output" 2> "${name}.error" +cat "${name}.error" >> "${name}.output" +cat L-*${name}.xml L-*${name}-default.xml >> "${name}.output" + +tempfile=`mktemp` + +# sed commands to normalize... +# - line numbers in error messages +echo "s/:[0-9]+:/:/g" > $tempfile + +# - timing info +echo "s/in [0-9]+ms\./in 0ms\./g" >> $tempfile + +# - library prefix +# TODO: should use prefix, shouldn't it? +echo s/\".*${name}\"/\"${name}\"/g >> $tempfile + +# - source path, ensure parenthesis are not interpreted by sed -E +# but allow any characters before sourcedir in the string +echo s%\".*${sourcedir//[\(\)]/.}/%\"%g >> $tempfile + +# Do normalization using the commands in the tempfile and the specified commandfile +sed -E -f "${tempfile}" -f "${commandfile}" "${name}.output" > "${name}.output.normalized" + +# Check for color capability +if test -t 1; then + ncolors=$(tput colors) + + if test -n "$ncolors" && test $ncolors -ge 8; then + green="$(tput setaf 2)" + normal="$(tput sgr0)" + fi +fi + +# Compare normalized output to expected +cmp -s "${name}.output.normalized" "${sourcedir}/${expected}" + +# If not the same, show diff +rc=$? +if [ $rc -ne 0 ] +then + echo + diff -c "${name}.output.normalized" "${sourcedir}/${expected}" +else + echo ${green}Ok${normal} +fi +exit $rc diff -Nru cgreen-1.3.0/tools/cgreen-runner.c cgreen-1.6.3/tools/cgreen-runner.c --- cgreen-1.3.0/tools/cgreen-runner.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/cgreen-runner.c 2023-10-03 15:16:52.000000000 +0000 @@ -32,16 +32,28 @@ /*----------------------------------------------------------------------*/ static void usage(const char *program_name) { printf("cgreen-runner for Cgreen unittest and mocking framework v%s\n\n", VERSION); - printf("Usage:\n %s [--xml ] [--suite ] [--verbose] [--quiet] [--no-run] [--help] ( [])+\n\n", program_name); + printf("Usage:\n %s [--suite ] [--verbose] [--quiet] [--no-run] " +#if HAVE_XML_REPORTER + "[--xml ] " +#endif +#if HAVE_LIBXML2_REPORTER + "[--libxml2 ] " +#endif + "[--help] ( [])+\n\n", program_name); printf("Discover and run all or named cgreen test(s) from one or multiple\n"); printf("dynamically loadable libraries.\n\n"); printf("A single test can be run using the form [:] where can\n"); printf("be omitted if there is no context.\n\n"); printf(" -c --colours/colors\t\tUse colours to emphasis result (requires ANSI-capable terminal)\n"); printf(" -C --no-colours/no-colors\tDon't use colours\n"); +#if HAVE_XML_REPORTER printf(" -x --xml \t\tInstead of messages on stdout, write results into one XML-file\n"); printf("\t\t\t\tper suite, compatible with Hudson/Jenkins CI. The filename(s)\n"); printf("\t\t\t\twill be '-.xml'\n"); +#endif +#if HAVE_LIBXML2_REPORTER + printf(" -X --libxml2 \t\tFormat the test results using libxml2\n"); +#endif printf(" -s --suite \t\tName the top level suite\n"); printf(" -n --no-run\t\t\tDon't run the tests\n"); printf(" -v --verbose\t\t\tShow progress information\n"); @@ -55,6 +67,7 @@ enum option_arg { HELP_OPT, XML_OPT, + LIBXML2_OPT, SUITE_OPT, VERBOSE_OPT, QUITE_OPT, @@ -74,9 +87,18 @@ .flags = GOPT_ARGUMENT_FORBIDDEN, }, [XML_OPT] = { +#if HAVE_XML_REPORTER .short_name = 'x', .long_name = "xml", .flags = GOPT_ARGUMENT_REQUIRED, +#endif + }, + [LIBXML2_OPT] = { +#if HAVE_LIBXML2_REPORTER + .short_name = 'X', + .long_name = "libxml2", +#endif + .flags = GOPT_ARGUMENT_REQUIRED, }, [SUITE_OPT] = { .short_name = 's', @@ -90,7 +112,7 @@ }, [QUITE_OPT] = { .short_name = 'q', - .long_name = "quite", + .long_name = "quiet", .flags = GOPT_ARGUMENT_FORBIDDEN, }, [VERSION_OPT] = { @@ -154,7 +176,11 @@ /*----------------------------------------------------------------------*/ static bool have_xml_option(void) { - return options[XML_OPT].count > 0; + return options[XML_OPT].count > 0 || options[LIBXML2_OPT].count > 0; +} + +static bool have_libxml2_option(void) { + return options[LIBXML2_OPT].count > 0; } /*----------------------------------------------------------------------*/ @@ -216,9 +242,21 @@ argc = gopt(argv, options); gopt_errors(argv[0], options); - if (options[XML_OPT].count) + if (have_libxml2_option()) { +#if HAVE_LIBXML2_REPORTER + reporter = create_libxml_reporter(options[LIBXML2_OPT].argument); +#else + printf("libxml2 reporter not available\n"); + return EXIT_FAILURE; +#endif + } else if (have_xml_option()) { +#if HAVE_XML_REPORTER reporter = create_xml_reporter(options[XML_OPT].argument); - else +#else + printf("XML reporter not available\n"); + return EXIT_FAILURE; +#endif + } else reporter = create_text_reporter(); suite_name_option = options[SUITE_OPT].argument; @@ -281,12 +319,13 @@ reporter_options.inhibit_finish_suite_message = false; int test_count = 0; - if (!have_xml_option() && suite_name_option != NULL) { + if (!have_xml_option() && (suite_name_option != NULL)) { /* Count all tests */ for (i = 0; i/dev/null 2>/dev/null; echo $?` if [ $unameo -eq 0 ]; then @@ -32,7 +34,7 @@ extension=dll ;; Msys) - prefix=lib + prefix=msys- extension=dll ;; *) @@ -54,7 +56,8 @@ sourcedir=$(perl -e 'use Cwd "abs_path"; print abs_path(@ARGV[0])' -- "$sourcedir") if [ $(uname -n) = thoni64 ]; then # On my Cygwin machine I have linked /home to c:/Users so absolute paths don't match - # what runner prints, so try to replace that (don't know how to generalize this...) + # what runner prints, so try to replace that + # TODO: generalize this... sourcedir=`echo $sourcedir | sed -e s#/cygdrive/c/Users#/home#` fi @@ -76,16 +79,16 @@ tempfile=`mktemp` + # sed commands to normalize... # - line numbers in error messages -echo "s/:[0-9]+:/:/g" > $tempfile +echo "s/:[0-9]+:/:000:/g" > $tempfile # - timing info echo "s/in [0-9]+ms\./in 0ms\./g" >> $tempfile # - library prefix -# TODO: should use prefix, shouldn't it? -echo s/\".*${name}\"/\"${name}\"/g >> $tempfile +echo s/\"${prefix}${name}\"/\"${name}\"/g >> $tempfile # - source path, ensure parenthesis are not interpreted by sed -E echo s%.*${sourcedir//[\(\)]/.}/%%g >> $tempfile diff -Nru cgreen-1.3.0/tools/cgreen_xml_output_diff cgreen-1.6.3/tools/cgreen_xml_output_diff --- cgreen-1.3.0/tools/cgreen_xml_output_diff 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/cgreen_xml_output_diff 2023-10-03 15:16:52.000000000 +0000 @@ -7,6 +7,11 @@ # ...expected output file name in $3 # ...and commands to normalize output (in the file normalize_{name}.sed) # +# TODO: refactor the duplication in this and cgreen_xml_output_diff to something +# not having the duplication, common sub-script, maybe? +# +# TODO: make 'prefix' and 'extension' into arguments instead of evaluating them here + # Determine OS unameo=`uname -o 1>/dev/null 2>/dev/null; echo $?` if [ $unameo -eq 0 ]; then @@ -30,7 +35,7 @@ extension=dll ;; Msys) - prefix=lib + prefix=msys- extension=dll ;; *) @@ -52,7 +57,8 @@ sourcedir=$(perl -e 'use Cwd "abs_path"; print abs_path(@ARGV[0])' -- "$sourcedir") if [ $(uname -n) = thoni64 ]; then # On my Cygwin machine I have linked /home to c:/Users so absolute paths don't match - # what runner prints, so try to replace that (don't know how to generalize this...) + # what runner prints, so try to replace that + # TODO: generalize this... sourcedir=`echo $sourcedir | sed -e s#/cygdrive/c/Users#/home#` fi @@ -83,8 +89,7 @@ echo "s/in [0-9]+ms\./in 0ms\./g" >> $tempfile # - library prefix -# TODO: should use prefix, shouldn't it? -echo s/\".*${name}\"/\"${name}\"/g >> $tempfile +echo s/\"${prefix}${name}\"/\"${name}\"/g >> $tempfile # - source path, ensure parenthesis are not interpreted by sed -E # but allow any characters before sourcedir in the string diff -Nru cgreen-1.3.0/tools/CMakeLists.txt cgreen-1.6.3/tools/CMakeLists.txt --- cgreen-1.3.0/tools/CMakeLists.txt 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/CMakeLists.txt 2023-10-03 15:16:52.000000000 +0000 @@ -1,18 +1,111 @@ -include_directories(${CGREEN_PUBLIC_INCLUDE_DIRS} ${PROJECT_BINARY_DIR}) +include_directories(${CGREEN_PUBLIC_INCLUDE_DIRS} ${PROJECT_BINARY_DIR} ${CURRENT_BINARY_DIR}) set(RUNNER_SRCS - cgreen-runner.c gopt.c gopt-errors.c runner.c discoverer.c test_item.c io.c) + ${CMAKE_CURRENT_SOURCE_DIR}/cgreen-runner.c + ${CMAKE_CURRENT_SOURCE_DIR}/gopt.c + ${CMAKE_CURRENT_SOURCE_DIR}/gopt-errors.c + ${CMAKE_CURRENT_SOURCE_DIR}/runner.c + ${CMAKE_CURRENT_SOURCE_DIR}/discoverer.c + ${CMAKE_CURRENT_SOURCE_DIR}/test_item.c + ${CMAKE_CURRENT_SOURCE_DIR}/io.c +) set_source_files_properties(${RUNNER_SRCS} PROPERTIES LANGUAGE C) -add_executable(cgreen-runner ${RUNNER_SRCS}) -target_link_libraries(cgreen-runner ${CGREEN_SHARED_LIBRARY} ${CMAKE_DL_LIBS}) +# Do we have an "nm"? +include(FindNm) -install(TARGETS cgreen-runner - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - DESTINATION ${CMAKE_INSTALL_BINDIR} -) +if (NM_FOUND) + set_source_files_properties(discoverer.c PROPERTIES COMPILE_FLAGS -DNM_EXECUTABLE='\"${NM_EXECUTABLE}\"') + + include(DefineRelativeFilePaths) + cmake_define_relative_file_paths("${RUNNER_SRCS}") + + add_executable(cgreen-runner ${RUNNER_SRCS}) + target_link_libraries(cgreen-runner ${CGREEN_LIBRARY} $<$:${LIBXML2_LIBRARIES}> ${CMAKE_DL_LIBS}) + + install(TARGETS cgreen-runner + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + DESTINATION ${CMAKE_INSTALL_BINDIR} + ) + + install(FILES cgreen-debug + DESTINATION ${CMAKE_INSTALL_BINDIR}) + + set(CGREEN_RUNNER_TESTS_LIBRARY + cgreen_runner_tests + CACHE INTERNAL "cgreen-runner tests shared library" + ) + set(RUNNER_TESTS_SRCS + runner_unit_tests.c + test_item.c + ) + add_library(${CGREEN_RUNNER_TESTS_LIBRARY} SHARED ${RUNNER_TESTS_SRCS}) + target_link_libraries(${CGREEN_RUNNER_TESTS_LIBRARY} ${CGREEN_LIBRARY}) + + # Due to some (of many) CMake irregularities to reference the test libraries + # we can't just use its CMake name variable, but have to look it up with + # some special attributes of the library: + # $/$ + # + SET(CGREEN_RUNNER_TESTS_LIBRARY "$/$") + + macro_add_test(NAME cgreen_runner_unit_tests + COMMAND cgreen-runner ${CGREEN_RUNNER_TESTS_LIBRARY}) + + macro_add_test(NAME cgreen_runner_usage + COMMAND cgreen-runner --help) + + macro_add_test(NAME cgreen_runner_quiet + COMMAND cgreen-runner -q ${CGREEN_RUNNER_TESTS_LIBRARY}) + + macro_add_test(NAME cgreen_runner_verbose + COMMAND cgreen-runner -v -C ${CGREEN_RUNNER_TESTS_LIBRARY}) + + macro_add_test(NAME cgreen_runner_version + COMMAND cgreen-runner --version) + + macro_add_test(NAME cgreen_runner_single_explicit_named_test + COMMAND cgreen-runner $/$ Runner:can_match_test_name) + + macro_add_test(NAME cgreen_runner_patternmatched_testnames + COMMAND cgreen-runner $/$ Runner:can*) + + macro_add_test(NAME cgreen_runner_suite_name + COMMAND cgreen-runner -s Suite ${CGREEN_RUNNER_TESTS_LIBRARY}) + + macro_add_test(NAME cgreen_runner_fail_on_non_existing_library + COMMAND cgreen-runner Suite non_existent_library) + set_tests_properties(cgreen_runner_fail_on_non_existing_library PROPERTIES WILL_FAIL true) + + macro_add_test(NAME cgreen_runner_fail_on_non_existing_library_with_suite + COMMAND cgreen-runner -s Suite non_existent_library + WILL_FAIL) + set_tests_properties(cgreen_runner_fail_on_non_existing_library_with_suite PROPERTIES WILL_FAIL true) + + macro_add_test(NAME cgreen_runner_patternmatched_testnames_in_patternmatched_context + COMMAND cgreen-runner $/$ Run*:can*) + + macro_add_test(NAME cgreen_runner_wildcarded_tests_in_named_context + COMMAND cgreen-runner $/$ Runner:*) + + macro_add_test(NAME cgreen_runner_wildcarded_tests_in_wildcarded_context + COMMAND cgreen-runner $/$ *:*) + + if (CGREEN_WITH_XML) + macro_add_test(NAME cgreen_runner_with_xml_reporter + COMMAND cgreen-runner --xml TEST --suite cgreen_runner_tests $/$) + endif (CGREEN_WITH_XML) + + if (CGREEN_WITH_LIBXML2) + macro_add_test(NAME cgreen_runner_with_libxml2_reporter + COMMAND cgreen-runner --libxml2 TEST --suite cgreen_runner_tests $/$) + endif (CGREEN_WITH_LIBXML2) + + macro_add_test(NAME cgreen_runner_multiple_libraries + COMMAND cgreen-runner ${CGREEN_RUNNER_TESTS_LIBRARY} ${CGREEN_RUNNER_TESTS_LIBRARY} ${CGREEN_RUNNER_TESTS_LIBRARY}) -install(PROGRAMS cgreen-debug - DESTINATION ${CMAKE_INSTALL_BINDIR}) +else() + message("No 'nm' found on this system. 'cgreen-runner' will not be built.") +endif() diff -Nru cgreen-1.3.0/tools/discoverer.c cgreen-1.6.3/tools/discoverer.c --- cgreen-1.3.0/tools/discoverer.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/discoverer.c 2023-10-03 15:16:52.000000000 +0000 @@ -60,7 +60,7 @@ close_file(library); char nm_command[1000]; - sprintf(nm_command, "/usr/bin/nm '%s'", filename); + sprintf(nm_command, "%s '%s' 2>&1", NM_EXECUTABLE, filename); FILE *nm_output_pipe = open_process(nm_command, "r"); if (nm_output_pipe == NULL) return NULL; @@ -68,5 +68,6 @@ CgreenVector *tests = create_cgreen_vector((GenericDestructor)&destroy_test_item); add_all_tests_from(nm_output_pipe, tests, verbose); close_process(nm_output_pipe); + return tests; } diff -Nru cgreen-1.3.0/tools/discoverer.mock cgreen-1.6.3/tools/discoverer.mock --- cgreen-1.3.0/tools/discoverer.mock 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/tools/discoverer.mock 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,14 @@ +#include "cgreen/mocks.h" + +#include "discoverer.h" + +// We want to run our tests with cgreen, of course, so in order to not +// cause conflicts with the function in the library itself we rename +// it here when we are running unittests. Also see Makefile. +#ifdef UNITTESTING +#define discover_tests_in(x,y) unittesting_discover_tests_in(x,y) +#endif + +CgreenVector *discover_tests_in(const char *filename, bool verbose) { + return (CgreenVector *)mock(filename, verbose); +} diff -Nru cgreen-1.3.0/tools/discoverer_unit_tests.c cgreen-1.6.3/tools/discoverer_unit_tests.c --- cgreen-1.3.0/tools/discoverer_unit_tests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/discoverer_unit_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -55,7 +55,7 @@ static char command[100]; expect_open_file(filename, (void *)1); expect(close_file, when(file, is_equal_to(1))); - sprintf(command, "nm %s", filename); + sprintf(command, "nm '%s'", filename); expect_open_process(command, (void *)2); expect_read_line_from(2, line1); expect_read_line_from(2, line2); diff -Nru cgreen-1.3.0/tools/.gitignore cgreen-1.6.3/tools/.gitignore --- cgreen-1.3.0/tools/.gitignore 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/.gitignore 2023-10-03 15:16:52.000000000 +0000 @@ -1,3 +1,7 @@ *.o *.d lib*.so +discoverer_unit_test_runner.c +discoverer_unit_tests +runner_unit_test_runner.c +runner_unit_tests \ No newline at end of file diff -Nru cgreen-1.3.0/tools/gopt.c cgreen-1.6.3/tools/gopt.c --- cgreen-1.3.0/tools/gopt.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/gopt.c 2023-10-03 15:16:52.000000000 +0000 @@ -81,7 +81,7 @@ unsigned int operand_count = 1; unsigned int doubledash = 0; unsigned int expecting = 0; - unsigned int option_index; + unsigned int option_index = 0; unsigned int i, j; for (i = 0; !(options[i].flags & GOPT_LAST); i++) diff -Nru cgreen-1.3.0/tools/io.mock cgreen-1.6.3/tools/io.mock --- cgreen-1.3.0/tools/io.mock 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/tools/io.mock 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,22 @@ +#include "io.h" + +FILE *open_file(const char *filename, const char *mode) { + return (FILE *) mock(filename, mode); +} + +int close_file(FILE *file) { + return (int) mock(file); +} + +FILE *open_process(const char *command, const char *mode) { + return (FILE *) mock(command, mode); +} + +int close_process(FILE *file) { + return (int) mock(file); +} + +int read_line(FILE *file, char *buffer, int max_length) { + return (int) mock(file, buffer, max_length); +} + diff -Nru cgreen-1.3.0/tools/io.mocks cgreen-1.6.3/tools/io.mocks --- cgreen-1.3.0/tools/io.mocks 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/io.mocks 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -#include "io.h" - -FILE *open_file(const char *filename, const char *mode) { - return (FILE *) mock(filename, mode); -} - -int close_file(FILE *file) { - return (int) mock(file); -} - -FILE *open_process(const char *command, const char *mode) { - return (FILE *) mock(command, mode); -} - -int close_process(FILE *file) { - return (int) mock(file); -} - -int read_line(FILE *file, char *buffer, int max_length) { - return (int) mock(file, buffer, max_length); -} - diff -Nru cgreen-1.3.0/tools/Makefile cgreen-1.6.3/tools/Makefile --- cgreen-1.3.0/tools/Makefile 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/Makefile 2023-10-03 15:16:52.000000000 +0000 @@ -6,7 +6,7 @@ # external symbols in the modules have other names when unittesting # them using macro magic. The UNITTESTING preprocessor symbol will # add the suffix '_unittesting' to externally, clashing symbols so -# that we can separate the once in the cgreen-runner that is running +# that we can separate the ones in the cgreen-runner that is running # the tests, and the ones in the units being tested. UNAMEOEXISTS=$(shell uname -o 1>&2 2>/dev/null; echo $$?) @@ -16,8 +16,7 @@ OS=$(shell uname -s) endif -COMMONFLAGS = -g -CFLAGS = $(COMMONFLAGS) -Wall -Wextra -I../include -MMD -DUNITTESTING +CFLAGS = -Wall -g -I../include -MMD -DUNITTESTING -I. ifneq ($(OS),Cygwin) CFLAGS+=-fPIC endif @@ -41,24 +40,51 @@ all: make --no-print-directory -C .. -# Use this if you want quick feed back in this directory +# Use this if you want quick feed-back in this directory tests: unit_tests acceptance_tests #---------------------------------------------------------------------- -main: main.o discoverer_acceptance_tests.o discoverer.o test_item.o io.o utils.o - $(CC) $(LDFLAGS) -o $@ $^ $(LIBRARIES) - -utils.o: ../src/utils.c +unit unit_tests: discoverer_unit_tests runner_unit_tests + LD_LIBRARY_PATH=. ./discoverer_unit_tests + LD_LIBRARY_PATH=. ./runner_unit_tests + +discoverer.o: CFLAGS += -DNM_EXECUTABLE='"nm"' + +discoverer_unit_tests : discoverer_unit_tests.so + $(CC) $(LDFLAGS) -o $@ $^ + +discoverer_unit_tests.so : discoverer_unit_test_runner.o discoverer.o test_item.o + $(CC) $(LDFLAGS) -shared -o $@ $^ $(LIBRARIES) + +# Auto-generate a runner for discoverer unittests +discoverer_unit_test_runner.c : discoverer_unit_tests.c + echo "#include \"discoverer_unit_tests.c\"" > discoverer_unit_test_runner.c + echo "TestSuite *discoverer_unit_tests() {" >> discoverer_unit_test_runner.c + echo " TestSuite *suite = create_test_suite();" >> discoverer_unit_test_runner.c + grep Ensure discoverer_unit_tests.c | sed -e 's/Ensure(/ add_test_with_context(suite, /g' -e 's/) {/);/g' >> discoverer_unit_test_runner.c + echo " return suite;" >> discoverer_unit_test_runner.c + echo "}" >> discoverer_unit_test_runner.c + echo "int main(int argc, char **argv) {" >> discoverer_unit_test_runner.c + echo " return run_test_suite(discoverer_unit_tests(), create_text_reporter());" >> discoverer_unit_test_runner.c + echo "}" >> discoverer_unit_test_runner.c -#---------------------------------------------------------------------- -unit_tests: libdiscoverer_unit_tests.so - $(CGREEN_RUNNER) $^ +runner_unit_tests: runner_unit_tests.so + $(CC) $(LDFLAGS) -o $@ $^ $(LIBRARIES) -libdiscoverer_unit_tests.so: discoverer_unit_tests.o discoverer.o test_item.o - $(CC) -shared -o $@ $^ $(LIBRARIES) +runner_unit_tests.so: runner_unit_test_runner.o test_item.o + $(CC) $(LDFLAGS) -shared -o $@ $^ -ldl $(LIBRARIES) -io.mocks : io.h - ../contrib/cgreen-mocker/cgreen-mocker.py -I../../pycparser-master/utils/fake_libc_include io.h > io.mocks +# Auto-generate a runner for runner unittests +runner_unit_test_runner.c : runner_unit_tests.c + echo "#include \"runner_unit_tests.c\"" > runner_unit_test_runner.c + echo "TestSuite *runner_unit_tests() {" >> runner_unit_test_runner.c + echo " TestSuite *suite = create_test_suite();" >> runner_unit_test_runner.c + grep Ensure runner_unit_tests.c | sed -e 's/Ensure(/ add_test_with_context(suite, /g' -e 's/) {/);/g' >> runner_unit_test_runner.c + echo " return suite;" >> runner_unit_test_runner.c + echo "}" >> runner_unit_test_runner.c + echo "int main(int argc, char **argv) {" >> runner_unit_test_runner.c + echo " return run_test_suite(runner_unit_tests(), create_text_reporter());" >> runner_unit_test_runner.c + echo "}" >> runner_unit_test_runner.c #---------------------------------------------------------------------- acceptance_tests: libdiscoverer_acceptance_tests.so @@ -69,6 +95,6 @@ #---------------------------------------------------------------------- clean: - -rm *.o *.so + -rm *.o *.so *.dll -include *.d diff -Nru cgreen-1.3.0/tools/runner.c cgreen-1.6.3/tools/runner.c --- cgreen-1.3.0/tools/runner.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/runner.c 2023-10-03 15:16:52.000000000 +0000 @@ -10,7 +10,7 @@ #include #include -#include "utils.h" +#include "../src/utils.h" #include "runner.h" #include "test_item.h" diff -Nru cgreen-1.3.0/tools/runner_unit_test_runner.c cgreen-1.6.3/tools/runner_unit_test_runner.c --- cgreen-1.3.0/tools/runner_unit_test_runner.c 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/tools/runner_unit_test_runner.c 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,18 @@ +#include "runner_unit_tests.c" +TestSuite *runner_unit_tests() { + TestSuite *suite = create_test_suite(); + add_test_with_context(suite, Runner, can_get_context_name_of_name); + add_test_with_context(suite, Runner, can_get_test_name_of_symbolic_name); + add_test_with_context(suite, Runner, can_ensure_test_exists_from_context_and_name); + add_test_with_context(suite, Runner, can_match_test_name); + add_test_with_context(suite, Runner, can_add_test_to_the_suite_for_its_context); + add_test_with_context(suite, Runner, can_sort_an_empty_list_of_tests); + add_test_with_context(suite, Runner, can_sort_a_list_of_a_single_tests); + add_test_with_context(suite, Runner, can_sort_a_list_of_two_unordered_tests); + add_test_with_context(suite, Runner, can_sort_an_ordered_list_of_two_tests); + add_test_with_context(suite, Runner, can_sort_an_unordered_list_of_tests); + return suite; +} +int main(int argc, char **argv) { + return run_test_suite(runner_unit_tests(), create_text_reporter()); +} diff -Nru cgreen-1.3.0/tools/runner_unit_tests.c cgreen-1.6.3/tools/runner_unit_tests.c --- cgreen-1.3.0/tools/runner_unit_tests.c 1970-01-01 00:00:00.000000000 +0000 +++ cgreen-1.6.3/tools/runner_unit_tests.c 2023-10-03 15:16:52.000000000 +0000 @@ -0,0 +1,196 @@ +#include + +#include "../src/utils.h" + +#ifdef __cplusplus +using namespace cgreen; +#endif + +#include "runner.c" + +#include "test_item.h" + +#include "discoverer.mock" +#include "io.mock" + + +Describe(Runner); + +BeforeEach(Runner){} + +AfterEach(Runner){} + +#define CONTEXT_NAME "context" +#define TEST_NAME "test" + +#define STRINGIFY_X(x) #x +#define STRINGIFY(x) STRINGIFY_X(x) + +Ensure(Runner, can_get_context_name_of_name) { + char *context_name = context_name_of("Context:Test"); + assert_that(context_name, is_equal_to_string("Context")); + free(context_name); + + context_name = context_name_of("Test"); + assert_that(context_name, is_equal_to_string(CGREEN_DEFAULT_SUITE)); + free(context_name); +} + + +Ensure(Runner, can_get_test_name_of_symbolic_name) { + char *test_name = test_name_of("Context:Test"); + assert_that(test_name, is_equal_to_string("Test")); + free(test_name); + + test_name = test_name_of("Test"); + assert_that(test_name, is_equal_to_string("Test")); + free(test_name); +} + + +static void add_test_items_to_vector(TestItem items[], CgreenVector *test_items, int count) { + for (int i=0; i < count; i++) + cgreen_vector_add(test_items, &items[i]); +} + + +Ensure(Runner, can_ensure_test_exists_from_context_and_name) { + TestItem test_items[5] = { + {(char *)"", (char *)"Context1", (char *)"Test1"}, + {(char *)"", (char *)"Context1", (char *)"Test2"}, + {(char *)"", (char *)"Context2", (char *)"Test1"}, + {(char *)"", (char *)"Context2", (char *)"Test2"}}; + CgreenVector *tests = create_cgreen_vector(NULL); + add_test_items_to_vector(test_items, tests, 5); + + assert_that(matching_test_exists("Context1:Test1", tests)); +} + +Ensure(Runner, can_match_test_name) { + TestItem test_item = {(char *)"", (char *)"Context1", (char *)"Test1"}; + + assert_that(test_matches_pattern("Context1:Test1", &test_item)); + assert_that(test_matches_pattern("Context*:Test1", &test_item)); + assert_that(test_matches_pattern("*:Test1", &test_item)); + + assert_that(test_matches_pattern("Context*:Test1", &test_item)); + assert_that(test_matches_pattern("*:Test1", &test_item)); + + assert_that(test_matches_pattern("Context1:Test*", &test_item)); + assert_that(test_matches_pattern("Context*:Test*", &test_item)); + assert_that(test_matches_pattern("Context*:*", &test_item)); + assert_that(test_matches_pattern("*:Test*", &test_item)); + assert_that(test_matches_pattern("*:*", &test_item)); +} + +Ensure(Runner, can_add_test_to_the_suite_for_its_context) { + ContextSuite *suite_list = NULL; + CgreenTest *test = (CgreenTest *)&test; + TestSuite *parent_suite = create_test_suite(); + TestSuite *first_suite, *second_suite; + TestItem test_item1 = {"", "TheFirstContext", "TheName"}; + TestItem test_item2 = {"", "TheSecondContext", "TheName"}; + + assert_that(suite_list, is_null); + + add_test_to_context(parent_suite, &suite_list, &test_item1, test); + first_suite = find_suite_for_context(suite_list, "TheFirstContext"); + assert_that(first_suite, is_non_null); + assert_that(first_suite->size, is_equal_to(1)); + + second_suite = find_suite_for_context(suite_list, "TheSecondContext"); + assert_that(second_suite, is_null); + + add_test_to_context(parent_suite, &suite_list, &test_item2, test); + assert_that(find_suite_for_context(suite_list, "TheFirstContext")->size, is_equal_to(1)); + assert_that(find_suite_for_context(suite_list, "TheSecondContext")->size, is_equal_to(1)); + + destroy_test_suite(parent_suite); + destroy_context_suites(suite_list); +} + +Ensure(Runner, can_sort_an_empty_list_of_tests) { + CgreenVector *test_items = create_cgreen_vector(NULL); + + test_items = sorted_test_items_from(test_items); + + assert_that(cgreen_vector_size(test_items) == 0); +} + +Ensure(Runner, can_sort_a_list_of_a_single_tests) { + TestItem test_item = { + (char *)"", (char *)"Context1", (char *)"Test1", + }; + CgreenVector *test_items = create_cgreen_vector(NULL); + cgreen_vector_add(test_items, &test_item); + + test_items = sorted_test_items_from(test_items); + assert_that(((TestItem *)cgreen_vector_get(test_items, 0))->test_name, + is_equal_to_string("Test1")); +} + +Ensure(Runner, can_sort_a_list_of_two_unordered_tests) { + TestItem test_items[] = { + {(char *)"", (char *)"Context1", (char *)"Test2"}, + {(char *)"", (char *)"Context1", (char *)"Test1"}, + }; + + CgreenVector *tests = create_cgreen_vector(NULL); + add_test_items_to_vector(test_items, tests, 2); + + tests = sorted_test_items_from(tests); + + assert_that(((TestItem *)cgreen_vector_get(tests, 0))->test_name, + is_equal_to_string("Test1")); + assert_that(((TestItem *)cgreen_vector_get(tests, 1))->test_name, + is_equal_to_string("Test2")); +} + +Ensure(Runner, can_sort_an_ordered_list_of_two_tests) { + TestItem test_item[] = { + {(char *)"", (char *)"Context1", (char *)"Test1"}, + {(char *)"", (char *)"Context1", (char *)"Test2"} + }; + + CgreenVector *test_items = create_cgreen_vector(NULL); + cgreen_vector_add(test_items, &test_item[0]); + cgreen_vector_add(test_items, &test_item[1]); + + test_items = sorted_test_items_from(test_items); + + assert_that(((TestItem *)cgreen_vector_get(test_items, 0))->test_name, + is_equal_to_string("Test1")); + assert_that(((TestItem *)cgreen_vector_get(test_items, 1))->test_name, + is_equal_to_string("Test2")); +} + +Ensure(Runner, can_sort_an_unordered_list_of_tests) { + TestItem unordered_test_items[] = { + {(char *)"", (char *)"Context1", (char *)"Test9"}, + {(char *)"", (char *)"Context1", (char *)"Test6"}, + {(char *)"", (char *)"Context1", (char *)"Test3"}, + {(char *)"", (char *)"Context1", (char *)"Test1"}, + {(char *)"", (char *)"Context1", (char *)"Test5"}, + {(char *)"", (char *)"Context1", (char *)"Test8"}, + {(char *)"", (char *)"Context1", (char *)"Test7"}, + {(char *)"", (char *)"Context1", (char *)"Test4"}, + {(char *)"", (char *)"Context1", (char *)"Test2"}, + }; + const char *expected_test_name[] = { + "Test1", "Test2", "Test3", "Test4", "Test5", "Test6", "Test7", "Test8", "Test9" }; + + CgreenVector *tests = create_cgreen_vector(NULL); + add_test_items_to_vector(unordered_test_items, tests, + sizeof(unordered_test_items)/sizeof(unordered_test_items[0])); + + tests = sorted_test_items_from(tests); + + for (int i=0; itest_name, + is_equal_to_string(expected_test_name[i])); +} + +/* vim: set ts=4 sw=4 et cindent: */ +/* Local variables: */ +/* tab-width: 4 */ +/* End: */ diff -Nru cgreen-1.3.0/tools/tests/CMakeLists.txt cgreen-1.6.3/tools/tests/CMakeLists.txt --- cgreen-1.3.0/tools/tests/CMakeLists.txt 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/tests/CMakeLists.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,60 +0,0 @@ -include_directories( - ${CMAKE_CURRENT_SOURCE_DIR}/.. - ${CGREEN_PUBLIC_INCLUDE_DIRS} - ${CURRENT_BINARY_DIR}) - -set(CGREEN_RUNNER_TESTS_LIBRARY - cgreen_runner_tests - CACHE INTERNAL "cgreen-runner tests shared library" -) -set(RUNNER_TESTS_SRCS - runnerTests.c - ../discoverer.c - ../io.c - ../test_item.c) -add_library(${CGREEN_RUNNER_TESTS_LIBRARY} SHARED ${RUNNER_TESTS_SRCS}) - -target_link_libraries(${CGREEN_RUNNER_TESTS_LIBRARY} ${CGREEN_SHARED_LIBRARY} ${CMAKE_DL_LIBS}) - -SET(CGREEN_RUNNER_TESTS_LIBRARY "$/$") - -# Due to some (of many) CMake irregularities to reference the test libraries -# we can't just use its CMake name variable, but have to look it up with -# some special attributes of the library: -# $/$ -# -macro_add_test(NAME cgreen_runner_unit_tests - COMMAND cgreen-runner ${CGREEN_RUNNER_TESTS_LIBRARY}) - -macro_add_test(NAME cgreen_runner_usage - COMMAND cgreen-runner --help) - -macro_add_test(NAME cgreen_runner_quiet - COMMAND cgreen-runner -q ${CGREEN_RUNNER_TESTS_LIBRARY}) - -macro_add_test(NAME cgreen_runner_verbose - COMMAND cgreen-runner -v -C ${CGREEN_RUNNER_TESTS_LIBRARY}) - -macro_add_test(NAME cgreen_runner_version - COMMAND cgreen-runner --version) - -macro_add_test(NAME cgreen_runner_single_explicit_named_test - COMMAND cgreen-runner $/$ Runner:can_match_test_name) - -macro_add_test(NAME cgreen_runner_patternmatched_testnames - COMMAND cgreen-runner $/$ Runner:can*) - -macro_add_test(NAME cgreen_runner_patternmatched_testnames_in_patternmatched_context - COMMAND cgreen-runner $/$ Run*:can*) - -macro_add_test(NAME cgreen_runner_wildcarded_tests_in_named_context - COMMAND cgreen-runner $/$ Runner:*) - -macro_add_test(NAME cgreen_runner_wildcarded_tests_in_wildcarded_context - COMMAND cgreen-runner $/$ *:*) - -macro_add_test(NAME cgreen_runner_with_xml_reporter - COMMAND cgreen-runner --xml TEST --suite cgreen_runner_tests $/$) - -macro_add_test(NAME cgreen_runner_multiple_libraries - COMMAND cgreen-runner ${CGREEN_RUNNER_TESTS_LIBRARY} ${CGREEN_RUNNER_TESTS_LIBRARY} ${CGREEN_RUNNER_TESTS_LIBRARY}) diff -Nru cgreen-1.3.0/tools/tests/runnerTests.c cgreen-1.6.3/tools/tests/runnerTests.c --- cgreen-1.3.0/tools/tests/runnerTests.c 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/tests/runnerTests.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,191 +0,0 @@ -#include - -#include "utils.h" - -#ifdef __cplusplus -using namespace cgreen; -#endif - -#include "../runner.c" -#include "test_item.h" - -Describe(Runner); - -BeforeEach(Runner){} - -AfterEach(Runner){} - -#define CONTEXT_NAME "context" -#define TEST_NAME "test" - -#define STRINGIFY_X(x) #x -#define STRINGIFY(x) STRINGIFY_X(x) - -Ensure(Runner, can_get_context_name_of_name) { - char *context_name = context_name_of("Context:Test"); - assert_that(context_name, is_equal_to_string("Context")); - free(context_name); - - context_name = context_name_of("Test"); - assert_that(context_name, is_equal_to_string(CGREEN_DEFAULT_SUITE)); - free(context_name); -} - - -Ensure(Runner, can_get_test_name_of_symbolic_name) { - char *test_name = test_name_of("Context:Test"); - assert_that(test_name, is_equal_to_string("Test")); - free(test_name); - - test_name = test_name_of("Test"); - assert_that(test_name, is_equal_to_string("Test")); - free(test_name); -} - - -static void add_test_items_to_vector(TestItem items[], CgreenVector *test_items, int count) { - for (int i=0; i < count; i++) - cgreen_vector_add(test_items, &items[i]); -} - - -Ensure(Runner, can_ensure_test_exists_from_context_and_name) { - TestItem test_items[5] = { - {(char *)"", (char *)"Context1", (char *)"Test1"}, - {(char *)"", (char *)"Context1", (char *)"Test2"}, - {(char *)"", (char *)"Context2", (char *)"Test1"}, - {(char *)"", (char *)"Context2", (char *)"Test2"}}; - CgreenVector *tests = create_cgreen_vector(NULL); - add_test_items_to_vector(test_items, tests, 5); - - assert_that(matching_test_exists("Context1:Test1", tests)); -} - -Ensure(Runner, can_match_test_name) { - TestItem test_item = {(char *)"", (char *)"Context1", (char *)"Test1"}; - - assert_that(test_matches_pattern("Context1:Test1", &test_item)); - assert_that(test_matches_pattern("Context*:Test1", &test_item)); - assert_that(test_matches_pattern("*:Test1", &test_item)); - - assert_that(test_matches_pattern("Context*:Test1", &test_item)); - assert_that(test_matches_pattern("*:Test1", &test_item)); - - assert_that(test_matches_pattern("Context1:Test*", &test_item)); - assert_that(test_matches_pattern("Context*:Test*", &test_item)); - assert_that(test_matches_pattern("Context*:*", &test_item)); - assert_that(test_matches_pattern("*:Test*", &test_item)); - assert_that(test_matches_pattern("*:*", &test_item)); -} - -Ensure(Runner, can_add_test_to_the_suite_for_its_context) { - ContextSuite *suite_list = NULL; - CgreenTest *test = (CgreenTest *)&test; - TestSuite *parent_suite = create_test_suite(); - TestSuite *first_suite, *second_suite; - TestItem test_item1 = {"", "TheFirstContext", "TheName"}; - TestItem test_item2 = {"", "TheSecondContext", "TheName"}; - - assert_that(suite_list, is_null); - - add_test_to_context(parent_suite, &suite_list, &test_item1, test); - first_suite = find_suite_for_context(suite_list, "TheFirstContext"); - assert_that(first_suite, is_non_null); - assert_that(first_suite->size, is_equal_to(1)); - - second_suite = find_suite_for_context(suite_list, "TheSecondContext"); - assert_that(second_suite, is_null); - - add_test_to_context(parent_suite, &suite_list, &test_item2, test); - assert_that(find_suite_for_context(suite_list, "TheFirstContext")->size, is_equal_to(1)); - assert_that(find_suite_for_context(suite_list, "TheSecondContext")->size, is_equal_to(1)); - - destroy_test_suite(parent_suite); - destroy_context_suites(suite_list); -} - -Ensure(Runner, can_sort_an_empty_list_of_tests) { - CgreenVector *test_items = create_cgreen_vector(NULL); - - test_items = sorted_test_items_from(test_items); - - assert_that(cgreen_vector_size(test_items) == 0); -} - -Ensure(Runner, can_sort_a_list_of_a_single_tests) { - TestItem test_item = { - (char *)"", (char *)"Context1", (char *)"Test1", - }; - CgreenVector *test_items = create_cgreen_vector(NULL); - cgreen_vector_add(test_items, &test_item); - - test_items = sorted_test_items_from(test_items); - assert_that(((TestItem *)cgreen_vector_get(test_items, 0))->test_name, - is_equal_to_string("Test1")); -} - -Ensure(Runner, can_sort_a_list_of_two_unordered_tests) { - TestItem test_items[] = { - {(char *)"", (char *)"Context1", (char *)"Test2"}, - {(char *)"", (char *)"Context1", (char *)"Test1"}, - }; - - CgreenVector *tests = create_cgreen_vector(NULL); - add_test_items_to_vector(test_items, tests, 2); - - tests = sorted_test_items_from(tests); - - assert_that(((TestItem *)cgreen_vector_get(tests, 0))->test_name, - is_equal_to_string("Test1")); - assert_that(((TestItem *)cgreen_vector_get(tests, 1))->test_name, - is_equal_to_string("Test2")); -} - -Ensure(Runner, can_sort_an_ordered_list_of_two_tests) { - TestItem test_item[] = { - {(char *)"", (char *)"Context1", (char *)"Test1"}, - {(char *)"", (char *)"Context1", (char *)"Test2"} - }; - - CgreenVector *test_items = create_cgreen_vector(NULL); - cgreen_vector_add(test_items, &test_item[0]); - cgreen_vector_add(test_items, &test_item[1]); - - test_items = sorted_test_items_from(test_items); - - assert_that(((TestItem *)cgreen_vector_get(test_items, 0))->test_name, - is_equal_to_string("Test1")); - assert_that(((TestItem *)cgreen_vector_get(test_items, 1))->test_name, - is_equal_to_string("Test2")); -} - -Ensure(Runner, can_sort_an_unordered_list_of_tests) { - TestItem unordered_test_items[] = { - {(char *)"", (char *)"Context1", (char *)"Test9"}, - {(char *)"", (char *)"Context1", (char *)"Test6"}, - {(char *)"", (char *)"Context1", (char *)"Test3"}, - {(char *)"", (char *)"Context1", (char *)"Test1"}, - {(char *)"", (char *)"Context1", (char *)"Test5"}, - {(char *)"", (char *)"Context1", (char *)"Test8"}, - {(char *)"", (char *)"Context1", (char *)"Test7"}, - {(char *)"", (char *)"Context1", (char *)"Test4"}, - {(char *)"", (char *)"Context1", (char *)"Test2"}, - }; - const char *expected_test_name[] = { - "Test1", "Test2", "Test3", "Test4", "Test5", "Test6", "Test7", "Test8", "Test9" }; - - CgreenVector *tests = create_cgreen_vector(NULL); - add_test_items_to_vector(unordered_test_items, tests, - sizeof(unordered_test_items)/sizeof(unordered_test_items[0])); - - tests = sorted_test_items_from(tests); - - for (int i=0; itest_name, - is_equal_to_string(expected_test_name[i])); -} - -/* vim: set ts=4 sw=4 et cindent: */ -/* Local variables: */ -/* tab-width: 4 */ -/* End: */ diff -Nru cgreen-1.3.0/tools/windows-git-symlinks-to-cygwin cgreen-1.6.3/tools/windows-git-symlinks-to-cygwin --- cgreen-1.3.0/tools/windows-git-symlinks-to-cygwin 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/tools/windows-git-symlinks-to-cygwin 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -#!/bin/bash -# -# http://stackoverflow.com/a/38140374/204658 -# -# Script to convert symbolic links created by a windows git -# clone/checkout to cygwin links. When a typical Windows git checks -# out a symbolic link from the repo it (MsysGit, at least) tend to -# produce text files with flag 120000 and the link target file name as -# the content. Strategy would simply be to find all those and replace -# by cygwin links. -# -# This will work if you are not planning on commiting anything, e.g. -# in a Jenkins, or other CI, build environment - -for f in `git ls-files -s | awk '$1 == "120000" {print $4}'` -do - echo $f is a symlink pointing to $dir/$target - dir=$(dirname "${f}") - pushd "$dir" 2>&1 > /dev/null - file=$(basename "$f") - target=`cat "$file"` - rm "$file" - ln -s "$target" "$file" - popd 2>&1 > /dev/null -done diff -Nru cgreen-1.3.0/.travis.yml cgreen-1.6.3/.travis.yml --- cgreen-1.3.0/.travis.yml 2020-06-04 18:00:10.000000000 +0000 +++ cgreen-1.6.3/.travis.yml 2023-10-03 15:16:52.000000000 +0000 @@ -1,7 +1,12 @@ -sudo: false language: c +dist: focal + # Build matrix +arch: + - amd64 + - s390x + compiler: - gcc - clang @@ -16,6 +21,8 @@ - cmake - lcov - g++ + - valgrind + - libxml2-dev before_install: - if [[ $CC == gcc ]] ; then export CXX=g++ ; else export CXX=clang++ ; fi @@ -27,9 +34,9 @@ script: - mkdir -p build - cd build - - cmake -DCGREEN_WITH_UNIT_TESTS:BOOL=ON -DCGREEN_WITH_STATIC_LIBRARY:BOOL=$CGREEN_WITH_STATIC_LIBRARY -DCGREEN_INTERNAL_WITH_GCOV:BOOL=$WITH_GCOV .. - - make -j2 - - ctest --output-on-failure + - cmake -DCGREEN_WITH_XML:BOOL=OFF -DCGREEN_WITH_LIBXML2:BOOL=OFF -DCGREEN_WITH_UNIT_TESTS:BOOL=ON -DCGREEN_WITH_STATIC_LIBRARY:BOOL=$CGREEN_WITH_STATIC_LIBRARY -DCGREEN_INTERNAL_WITH_GCOV:BOOL=OFF .. && make -j2 && ctest --output-on-failure + - rm -f CMakeCache.txt + - cmake -DCGREEN_WITH_UNIT_TESTS:BOOL=ON -DCGREEN_WITH_STATIC_LIBRARY:BOOL=$CGREEN_WITH_STATIC_LIBRARY -DCGREEN_INTERNAL_WITH_GCOV:BOOL=$WITH_GCOV .. && make -j2 && ctest --output-on-failure after_success: - if [ "$CC" = "gcc" ]; @@ -41,4 +48,6 @@ fi notifications: - slack: cgreen:EC3bZqwnyBsaqyeIndEewwNT + slack: + rooms: + secure: "Tobw+sqwGWscQo/mnBiO/+CQDMu8of5kXUhFKK1GblxTqxvJGGRVVvjt5ZlYaTyJD6YCzaVAO9hSjMvebnTTIqqP5WFmhpO3Du3khZJhFjzhqfjIUmK4S0rAMs3BBvLTGAUa0Hvnrt1VOkOJUdwdSW8nmnw9uFx3rVCthAOLkQugUY4s8tCc/QHBJPUSqdKntTq9e4bapaMl4gREPFvtneOyN7LQp0JtI/nyrdTVlONaqkPv7M8sEC7a6ec6RK/AiEurp8XczNW0ifTwqCg2y/sLfnRg+wzG1H9sMmzs+Kx7akcGdGzi/rqWRnBDF3mKL4pt8YhmBy/znDnMAR559puRiyz6ZZhlBKRviZrsfQmC0F3IyTZY/S3SjvatT9DrAIsHDLbDNxRxzO0GC+cpkq4HU8VnF4rQ31VVb75igMHzgw3SvDOqsZ3g0jPiBybEi0jVRUzmDWrw7p6qtl5hwQam6dxfmO6xXpwOTqPI9WQEOZeZfOw7eIeEbpB5wJ723z2cEwutoDdConIZElJBKjcjFp5rjR/yG94H/0OEgCmC/JRrdd71BB9vPh4SO40RvKkxV0adY7Xk2vkAxwLFhxS7QIdNfyQBFFEBnR+JFBHE+9XwEH1JmaAbrSYgg0+vVUZPOuqcqvz+9PUTlAb5rqYB97XEbF//5t8URrriuB0="